aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig44
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/NCR_D700.c2
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c10
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h155
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h1509
-rw-r--r--drivers/scsi/bnx2i/Kconfig10
-rw-r--r--drivers/scsi/bnx2i/Makefile3
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h771
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2405
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c438
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2064
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c142
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i.h1
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.c90
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.h2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c26
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c23
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.h3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c6
-rw-r--r--drivers/scsi/dpt/osd_util.h2
-rw-r--r--drivers/scsi/eata.c24
-rw-r--r--drivers/scsi/fcoe/fcoe.c398
-rw-r--r--drivers/scsi/fcoe/fcoe.h25
-rw-r--r--drivers/scsi/fcoe/libfcoe.c125
-rw-r--r--drivers/scsi/fnic/fnic_main.c1
-rw-r--r--drivers/scsi/gdth_proc.c5
-rw-r--r--drivers/scsi/hosts.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c466
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h41
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c469
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c2
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h68
-rw-r--r--drivers/scsi/ipr.c143
-rw-r--r--drivers/scsi/ipr.h10
-rw-r--r--drivers/scsi/iscsi_tcp.c14
-rw-r--r--drivers/scsi/libfc/fc_disc.c83
-rw-r--r--drivers/scsi/libfc/fc_exch.c62
-rw-r--r--drivers/scsi/libfc/fc_fcp.c99
-rw-r--r--drivers/scsi/libfc/fc_lport.c156
-rw-r--r--drivers/scsi/libfc/fc_rport.c126
-rw-r--r--drivers/scsi/libiscsi.c623
-rw-r--r--drivers/scsi/libiscsi_tcp.c24
-rw-r--r--drivers/scsi/libsas/sas_expander.c16
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c49
-rw-r--r--drivers/scsi/libsrp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h119
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c256
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h63
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c302
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c1388
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h141
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2141
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5626
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h54
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c677
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c206
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c51
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1329
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c6811
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h467
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c64
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/mbox_defs.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h5
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c32
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c363
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c59
-rw-r--r--drivers/scsi/mvsas.c3222
-rw-r--r--drivers/scsi/mvsas/Kconfig42
-rw-r--r--drivers/scsi/mvsas/Makefile32
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c793
-rw-r--r--drivers/scsi/mvsas/mv_64xx.h151
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c672
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h222
-rw-r--r--drivers/scsi/mvsas/mv_chips.h280
-rw-r--r--drivers/scsi/mvsas/mv_defs.h502
-rw-r--r--drivers/scsi/mvsas/mv_init.c703
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2154
-rw-r--r--drivers/scsi/mvsas/mv_sas.h406
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/osd/Kbuild25
-rwxr-xr-xdrivers/scsi/osd/Makefile37
-rw-r--r--drivers/scsi/osd/osd_initiator.c155
-rw-r--r--drivers/scsi/osd/osd_uld.c66
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c6
-rw-r--r--drivers/scsi/ps3rom.c10
-rw-r--r--drivers/scsi/qla1280.c387
-rw-r--r--drivers/scsi/qla1280.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c227
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h45
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h43
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c208
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c255
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c266
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c118
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c294
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c54
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_debug.c32
-rw-r--r--drivers/scsi/scsi_devinfo.c248
-rw-r--r--drivers/scsi/scsi_error.c21
-rw-r--r--drivers/scsi/scsi_lib.c102
-rw-r--r--drivers/scsi/scsi_priv.h17
-rw-r--r--drivers/scsi/scsi_scan.c4
-rw-r--r--drivers/scsi/scsi_sysfs.c17
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c618
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c174
-rw-r--r--drivers/scsi/scsi_transport_sas.c8
-rw-r--r--drivers/scsi/scsi_transport_spi.c65
-rw-r--r--drivers/scsi/sd.c145
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/scsi/sg.c18
-rw-r--r--drivers/scsi/sr.c18
-rw-r--r--drivers/scsi/st.c8
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c66
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c54
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.h2
-rw-r--r--drivers/scsi/u14-34f.c22
134 files changed, 36491 insertions, 7610 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index fb2740789b68..9c23122f755f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -191,20 +191,19 @@ config SCSI_ENCLOSURE
191 it has an enclosure device. Selecting this option will just allow 191 it has an enclosure device. Selecting this option will just allow
192 certain enclosure conditions to be reported and is not required. 192 certain enclosure conditions to be reported and is not required.
193 193
194comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs"
195 depends on SCSI
196
197config SCSI_MULTI_LUN 194config SCSI_MULTI_LUN
198 bool "Probe all LUNs on each SCSI device" 195 bool "Probe all LUNs on each SCSI device"
199 depends on SCSI 196 depends on SCSI
200 help 197 help
201 If you have a SCSI device that supports more than one LUN (Logical 198 Some devices support more than one LUN (Logical Unit Number) in order
202 Unit Number), e.g. a CD jukebox, and only one LUN is detected, you 199 to allow access to several media, e.g. CD jukebox, USB card reader,
203 can say Y here to force the SCSI driver to probe for multiple LUNs. 200 mobile phone in mass storage mode. This option forces the kernel to
204 A SCSI device with multiple LUNs acts logically like multiple SCSI 201 probe for all LUNs by default. This setting can be overriden by
205 devices. The vast majority of SCSI devices have only one LUN, and 202 max_luns boot/module parameter. Note that this option does not affect
206 so most people can say N here. The max_luns boot/module parameter 203 devices conforming to SCSI-3 or higher as they can explicitely report
207 allows to override this setting. 204 their number of LUNs. It is safe to say Y here unless you have one of
205 those rare devices which reacts in an unexpected way when probed for
206 multiple LUNs.
208 207
209config SCSI_CONSTANTS 208config SCSI_CONSTANTS
210 bool "Verbose SCSI error reporting (kernel size +=12K)" 209 bool "Verbose SCSI error reporting (kernel size +=12K)"
@@ -259,10 +258,21 @@ config SCSI_SCAN_ASYNC
259 or async on the kernel's command line. 258 or async on the kernel's command line.
260 259
261config SCSI_WAIT_SCAN 260config SCSI_WAIT_SCAN
262 tristate 261 tristate # No prompt here, this is an invisible symbol.
263 default m 262 default m
264 depends on SCSI 263 depends on SCSI
265 depends on MODULES 264 depends on MODULES
265# scsi_wait_scan is a loadable module which waits until all the async scans are
266# complete. The idea is to use it in initrd/ initramfs scripts. You modprobe
267# it after all the modprobes of the root SCSI drivers and it will wait until
268# they have all finished scanning their buses before allowing the boot to
269# proceed. (This method is not applicable if targets boot independently in
270# parallel with the initiator, or with transports with non-deterministic target
271# discovery schemes, or if a transport driver does not support scsi_wait_scan.)
272#
273# This symbol is not exposed as a prompt because little is to be gained by
274# disabling it, whereas people who accidentally switch it off may wonder why
275# their mkinitrd gets into trouble.
266 276
267menu "SCSI Transports" 277menu "SCSI Transports"
268 depends on SCSI 278 depends on SCSI
@@ -355,6 +365,7 @@ config ISCSI_TCP
355 http://open-iscsi.org 365 http://open-iscsi.org
356 366
357source "drivers/scsi/cxgb3i/Kconfig" 367source "drivers/scsi/cxgb3i/Kconfig"
368source "drivers/scsi/bnx2i/Kconfig"
358 369
359config SGIWD93_SCSI 370config SGIWD93_SCSI
360 tristate "SGI WD93C93 SCSI Driver" 371 tristate "SGI WD93C93 SCSI Driver"
@@ -508,6 +519,7 @@ config SCSI_AIC7XXX_OLD
508 519
509source "drivers/scsi/aic7xxx/Kconfig.aic79xx" 520source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
510source "drivers/scsi/aic94xx/Kconfig" 521source "drivers/scsi/aic94xx/Kconfig"
522source "drivers/scsi/mvsas/Kconfig"
511 523
512config SCSI_DPT_I2O 524config SCSI_DPT_I2O
513 tristate "Adaptec I2O RAID support " 525 tristate "Adaptec I2O RAID support "
@@ -1050,16 +1062,6 @@ config SCSI_IZIP_SLOW_CTR
1050 1062
1051 Generally, saying N is fine. 1063 Generally, saying N is fine.
1052 1064
1053config SCSI_MVSAS
1054 tristate "Marvell 88SE6440 SAS/SATA support"
1055 depends on PCI && SCSI
1056 select SCSI_SAS_LIBSAS
1057 help
1058 This driver supports Marvell SAS/SATA PCI devices.
1059
1060 To compiler this driver as a module, choose M here: the module
1061 will be called mvsas.
1062
1063config SCSI_NCR53C406A 1065config SCSI_NCR53C406A
1064 tristate "NCR53c406a SCSI support" 1066 tristate "NCR53c406a SCSI support"
1065 depends on ISA && SCSI 1067 depends on ISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index a5049cfb40ed..25429ea63d0a 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -126,9 +126,10 @@ obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
126obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/ 126obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
127obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 127obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
128obj-$(CONFIG_SCSI_STEX) += stex.o 128obj-$(CONFIG_SCSI_STEX) += stex.o
129obj-$(CONFIG_SCSI_MVSAS) += mvsas.o 129obj-$(CONFIG_SCSI_MVSAS) += mvsas/
130obj-$(CONFIG_PS3_ROM) += ps3rom.o 130obj-$(CONFIG_PS3_ROM) += ps3rom.o
131obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ 131obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
132obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
132 133
133obj-$(CONFIG_ARM) += arm/ 134obj-$(CONFIG_ARM) += arm/
134 135
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index c889d8458684..1cdf09a4779a 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -224,7 +224,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
224 return ret; 224 return ret;
225} 225}
226 226
227static int 227static irqreturn_t
228NCR_D700_intr(int irq, void *data) 228NCR_D700_intr(int irq, void *data)
229{ 229{
230 struct NCR_D700_private *p = (struct NCR_D700_private *)data; 230 struct NCR_D700_private *p = (struct NCR_D700_private *)data;
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index ed0e3e55652a..538135783aab 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -646,7 +646,7 @@ static int aha1740_probe (struct device *dev)
646 646
647static __devexit int aha1740_remove (struct device *dev) 647static __devexit int aha1740_remove (struct device *dev)
648{ 648{
649 struct Scsi_Host *shpnt = dev->driver_data; 649 struct Scsi_Host *shpnt = dev_get_drvdata(dev);
650 struct aha1740_hostdata *host = HOSTDATA (shpnt); 650 struct aha1740_hostdata *host = HOSTDATA (shpnt);
651 651
652 scsi_remove_host(shpnt); 652 scsi_remove_host(shpnt);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 0f829b3b8ab7..75b23317bd26 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -627,19 +627,15 @@ ahd_linux_target_alloc(struct scsi_target *starget)
627 starget->id, &tstate); 627 starget->id, &tstate);
628 628
629 if ((flags & CFPACKETIZED) == 0) { 629 if ((flags & CFPACKETIZED) == 0) {
630 /* Do not negotiate packetized transfers */ 630 /* don't negotiate packetized (IU) transfers */
631 spi_rd_strm(starget) = 0; 631 spi_max_iu(starget) = 0;
632 spi_pcomp_en(starget) = 0;
633 spi_rti(starget) = 0;
634 spi_wr_flow(starget) = 0;
635 spi_hold_mcs(starget) = 0;
636 } else { 632 } else {
637 if ((ahd->features & AHD_RTI) == 0) 633 if ((ahd->features & AHD_RTI) == 0)
638 spi_rti(starget) = 0; 634 spi_rti(starget) = 0;
639 } 635 }
640 636
641 if ((flags & CFQAS) == 0) 637 if ((flags & CFQAS) == 0)
642 spi_qas(starget) = 0; 638 spi_max_qas(starget) = 0;
643 639
644 /* Transinfo values have been set to BIOS settings */ 640 /* Transinfo values have been set to BIOS settings */
645 spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0; 641 spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0;
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
new file mode 100644
index 000000000000..2fceb19eb27b
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -0,0 +1,155 @@
1/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 */
11#ifndef __57XX_ISCSI_CONSTANTS_H_
12#define __57XX_ISCSI_CONSTANTS_H_
13
14/**
15* This file defines HSI constants for the iSCSI flows
16*/
17
18/* iSCSI request op codes */
19#define ISCSI_OPCODE_CLEANUP_REQUEST (7)
20
21/* iSCSI response/messages op codes */
22#define ISCSI_OPCODE_CLEANUP_RESPONSE (0x27)
23#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION (0)
24
25/* iSCSI task types */
26#define ISCSI_TASK_TYPE_READ (0)
27#define ISCSI_TASK_TYPE_WRITE (1)
28#define ISCSI_TASK_TYPE_MPATH (2)
29
30/* initial CQ sequence numbers */
31#define ISCSI_INITIAL_SN (1)
32
33/* KWQ (kernel work queue) layer codes */
34#define ISCSI_KWQE_LAYER_CODE (6)
35
36/* KWQ (kernel work queue) request op codes */
37#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0)
38#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1)
39#define ISCSI_KWQE_OPCODE_UPDATE_CONN (2)
40#define ISCSI_KWQE_OPCODE_DESTROY_CONN (3)
41#define ISCSI_KWQE_OPCODE_INIT1 (4)
42#define ISCSI_KWQE_OPCODE_INIT2 (5)
43
44/* KCQ (kernel completion queue) response op codes */
45#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN (0x10)
46#define ISCSI_KCQE_OPCODE_UPDATE_CONN (0x12)
47#define ISCSI_KCQE_OPCODE_DESTROY_CONN (0x13)
48#define ISCSI_KCQE_OPCODE_INIT (0x14)
49#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK (0x15)
50#define ISCSI_KCQE_OPCODE_TCP_RESET (0x16)
51#define ISCSI_KCQE_OPCODE_TCP_SYN (0x17)
52#define ISCSI_KCQE_OPCODE_TCP_FIN (0X18)
53#define ISCSI_KCQE_OPCODE_TCP_ERROR (0x19)
54#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
55#define ISCSI_KCQE_OPCODE_ISCSI_ERROR (0x21)
56
57/* KCQ (kernel completion queue) completion status */
58#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS (0x0)
59#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x1)
60#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x2)
61#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x3)
62#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR (0x4)
63
64#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR (0x5)
65#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR (0x6)
66
67#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE (0xa)
68#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE (0xb)
69#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN (0xc)
70#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT (0xd)
71#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN (0xe)
72
73/* Response */
74#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN (0xf)
75#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T (0x10)
76#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO (0x2c)
77#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG (0x2d)
78#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0 (0x11)
79#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1 (0x12)
80#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2 (0x13)
81#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3 (0x14)
82#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4 (0x15)
83#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5 (0x16)
84#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6 (0x17)
85
86/* Data-In */
87#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN (0x18)
88#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN (0x19)
89#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO (0x1a)
90#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV (0x1b)
91#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN (0x1c)
92#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN (0x1d)
93
94/* R2T */
95#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF (0x1f)
96#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN (0x20)
97#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN (0x21)
98#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22)
99#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23)
100#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED (0x24)
101#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV (0x25)
102#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN (0x26)
103#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27)
104
105/* TMF */
106#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN (0x28)
107#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN (0x29)
108#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN (0x2a)
109#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP (0x2b)
110
111/* IP/TCP processing errors: */
112#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT (0x40)
113#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS (0x41)
114#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG (0x42)
115#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS (0x43)
116
117/* iSCSI licensing errors */
118/* general iSCSI license not installed */
119#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED (0x50)
120/* additional LOM specific iSCSI license not installed */
121#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51)
122
123/* SQ/RQ/CQ DB structure sizes */
124#define ISCSI_SQ_DB_SIZE (16)
125#define ISCSI_RQ_DB_SIZE (16)
126#define ISCSI_CQ_DB_SIZE (80)
127
128#define ISCSI_SQN_TO_NOTIFY_NOT_VALID 0xFFFF
129
130/* Page size codes (for flags field in connection offload request) */
131#define ISCSI_PAGE_SIZE_256 (0)
132#define ISCSI_PAGE_SIZE_512 (1)
133#define ISCSI_PAGE_SIZE_1K (2)
134#define ISCSI_PAGE_SIZE_2K (3)
135#define ISCSI_PAGE_SIZE_4K (4)
136#define ISCSI_PAGE_SIZE_8K (5)
137#define ISCSI_PAGE_SIZE_16K (6)
138#define ISCSI_PAGE_SIZE_32K (7)
139#define ISCSI_PAGE_SIZE_64K (8)
140#define ISCSI_PAGE_SIZE_128K (9)
141#define ISCSI_PAGE_SIZE_256K (10)
142#define ISCSI_PAGE_SIZE_512K (11)
143#define ISCSI_PAGE_SIZE_1M (12)
144#define ISCSI_PAGE_SIZE_2M (13)
145#define ISCSI_PAGE_SIZE_4M (14)
146#define ISCSI_PAGE_SIZE_8M (15)
147
148/* Iscsi PDU related defines */
149#define ISCSI_HEADER_SIZE (48)
150#define ISCSI_DIGEST_SHIFT (2)
151#define ISCSI_DIGEST_SIZE (4)
152
153#define B577XX_ISCSI_CONNECTION_TYPE 3
154
155#endif /*__57XX_ISCSI_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
new file mode 100644
index 000000000000..36af1afef9b6
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -0,0 +1,1509 @@
1/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 */
11#ifndef __57XX_ISCSI_HSI_LINUX_LE__
12#define __57XX_ISCSI_HSI_LINUX_LE__
13
14/*
15 * iSCSI Async CQE
16 */
17struct bnx2i_async_msg {
18#if defined(__BIG_ENDIAN)
19 u8 op_code;
20 u8 reserved1;
21 u16 reserved0;
22#elif defined(__LITTLE_ENDIAN)
23 u16 reserved0;
24 u8 reserved1;
25 u8 op_code;
26#endif
27 u32 reserved2;
28 u32 exp_cmd_sn;
29 u32 max_cmd_sn;
30 u32 reserved3[2];
31#if defined(__BIG_ENDIAN)
32 u16 reserved5;
33 u8 err_code;
34 u8 reserved4;
35#elif defined(__LITTLE_ENDIAN)
36 u8 reserved4;
37 u8 err_code;
38 u16 reserved5;
39#endif
40 u32 reserved6;
41 u32 lun[2];
42#if defined(__BIG_ENDIAN)
43 u8 async_event;
44 u8 async_vcode;
45 u16 param1;
46#elif defined(__LITTLE_ENDIAN)
47 u16 param1;
48 u8 async_vcode;
49 u8 async_event;
50#endif
51#if defined(__BIG_ENDIAN)
52 u16 param2;
53 u16 param3;
54#elif defined(__LITTLE_ENDIAN)
55 u16 param3;
56 u16 param2;
57#endif
58 u32 reserved7[3];
59 u32 cq_req_sn;
60};
61
62
63/*
64 * iSCSI Buffer Descriptor (BD)
65 */
66struct iscsi_bd {
67 u32 buffer_addr_hi;
68 u32 buffer_addr_lo;
69#if defined(__BIG_ENDIAN)
70 u16 reserved0;
71 u16 buffer_length;
72#elif defined(__LITTLE_ENDIAN)
73 u16 buffer_length;
74 u16 reserved0;
75#endif
76#if defined(__BIG_ENDIAN)
77 u16 reserved3;
78 u16 flags;
79#define ISCSI_BD_RESERVED1 (0x3F<<0)
80#define ISCSI_BD_RESERVED1_SHIFT 0
81#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
82#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
83#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
84#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
85#define ISCSI_BD_RESERVED2 (0xFF<<8)
86#define ISCSI_BD_RESERVED2_SHIFT 8
87#elif defined(__LITTLE_ENDIAN)
88 u16 flags;
89#define ISCSI_BD_RESERVED1 (0x3F<<0)
90#define ISCSI_BD_RESERVED1_SHIFT 0
91#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
92#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
93#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
94#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
95#define ISCSI_BD_RESERVED2 (0xFF<<8)
96#define ISCSI_BD_RESERVED2_SHIFT 8
97 u16 reserved3;
98#endif
99};
100
101
102/*
103 * iSCSI Cleanup SQ WQE
104 */
105struct bnx2i_cleanup_request {
106#if defined(__BIG_ENDIAN)
107 u8 op_code;
108 u8 reserved1;
109 u16 reserved0;
110#elif defined(__LITTLE_ENDIAN)
111 u16 reserved0;
112 u8 reserved1;
113 u8 op_code;
114#endif
115 u32 reserved2[3];
116#if defined(__BIG_ENDIAN)
117 u16 reserved3;
118 u16 itt;
119#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
120#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
121#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
122#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
123#elif defined(__LITTLE_ENDIAN)
124 u16 itt;
125#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
126#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
127#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
128#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
129 u16 reserved3;
130#endif
131 u32 reserved4[10];
132#if defined(__BIG_ENDIAN)
133 u8 cq_index;
134 u8 reserved6;
135 u16 reserved5;
136#elif defined(__LITTLE_ENDIAN)
137 u16 reserved5;
138 u8 reserved6;
139 u8 cq_index;
140#endif
141};
142
143
144/*
145 * iSCSI Cleanup CQE
146 */
147struct bnx2i_cleanup_response {
148#if defined(__BIG_ENDIAN)
149 u8 op_code;
150 u8 status;
151 u16 reserved0;
152#elif defined(__LITTLE_ENDIAN)
153 u16 reserved0;
154 u8 status;
155 u8 op_code;
156#endif
157 u32 reserved1[3];
158 u32 reserved2[2];
159#if defined(__BIG_ENDIAN)
160 u16 reserved4;
161 u8 err_code;
162 u8 reserved3;
163#elif defined(__LITTLE_ENDIAN)
164 u8 reserved3;
165 u8 err_code;
166 u16 reserved4;
167#endif
168 u32 reserved5[7];
169#if defined(__BIG_ENDIAN)
170 u16 reserved6;
171 u16 itt;
172#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
173#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
174#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
175#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
176#elif defined(__LITTLE_ENDIAN)
177 u16 itt;
178#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
179#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
180#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
181#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
182 u16 reserved6;
183#endif
184 u32 cq_req_sn;
185};
186
187
188/*
189 * SCSI read/write SQ WQE
190 */
191struct bnx2i_cmd_request {
192#if defined(__BIG_ENDIAN)
193 u8 op_code;
194 u8 op_attr;
195#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
196#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
197#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
198#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
199#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
200#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
201#define ISCSI_CMD_REQUEST_READ (0x1<<6)
202#define ISCSI_CMD_REQUEST_READ_SHIFT 6
203#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
204#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
205 u16 reserved0;
206#elif defined(__LITTLE_ENDIAN)
207 u16 reserved0;
208 u8 op_attr;
209#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
210#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
211#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
212#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
213#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
214#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
215#define ISCSI_CMD_REQUEST_READ (0x1<<6)
216#define ISCSI_CMD_REQUEST_READ_SHIFT 6
217#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
218#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
219 u8 op_code;
220#endif
221#if defined(__BIG_ENDIAN)
222 u16 ud_buffer_offset;
223 u16 sd_buffer_offset;
224#elif defined(__LITTLE_ENDIAN)
225 u16 sd_buffer_offset;
226 u16 ud_buffer_offset;
227#endif
228 u32 lun[2];
229#if defined(__BIG_ENDIAN)
230 u16 reserved2;
231 u16 itt;
232#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
233#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
234#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
235#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
236#elif defined(__LITTLE_ENDIAN)
237 u16 itt;
238#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
239#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
240#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
241#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
242 u16 reserved2;
243#endif
244 u32 total_data_transfer_length;
245 u32 cmd_sn;
246 u32 reserved3;
247 u32 cdb[4];
248 u32 zero_fill;
249 u32 bd_list_addr_lo;
250 u32 bd_list_addr_hi;
251#if defined(__BIG_ENDIAN)
252 u8 cq_index;
253 u8 sd_start_bd_index;
254 u8 ud_start_bd_index;
255 u8 num_bds;
256#elif defined(__LITTLE_ENDIAN)
257 u8 num_bds;
258 u8 ud_start_bd_index;
259 u8 sd_start_bd_index;
260 u8 cq_index;
261#endif
262};
263
264
265/*
266 * task statistics for write response
267 */
268struct bnx2i_write_resp_task_stat {
269 u32 num_data_ins;
270};
271
272/*
273 * task statistics for read response
274 */
275struct bnx2i_read_resp_task_stat {
276#if defined(__BIG_ENDIAN)
277 u16 num_data_outs;
278 u16 num_r2ts;
279#elif defined(__LITTLE_ENDIAN)
280 u16 num_r2ts;
281 u16 num_data_outs;
282#endif
283};
284
285/*
286 * task statistics for iSCSI cmd response
287 */
288union bnx2i_cmd_resp_task_stat {
289 struct bnx2i_write_resp_task_stat write_stat;
290 struct bnx2i_read_resp_task_stat read_stat;
291};
292
293/*
294 * SCSI Command CQE
295 */
296struct bnx2i_cmd_response {
297#if defined(__BIG_ENDIAN)
298 u8 op_code;
299 u8 response_flags;
300#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
301#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
302#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
303#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
304#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
305#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
306#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
307#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
308#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
309#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
310#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
311#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
312 u8 response;
313 u8 status;
314#elif defined(__LITTLE_ENDIAN)
315 u8 status;
316 u8 response;
317 u8 response_flags;
318#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
319#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
320#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
321#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
322#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
323#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
324#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
325#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
326#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
327#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
328#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
329#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
330 u8 op_code;
331#endif
332 u32 data_length;
333 u32 exp_cmd_sn;
334 u32 max_cmd_sn;
335 u32 reserved2;
336 u32 residual_count;
337#if defined(__BIG_ENDIAN)
338 u16 reserved4;
339 u8 err_code;
340 u8 reserved3;
341#elif defined(__LITTLE_ENDIAN)
342 u8 reserved3;
343 u8 err_code;
344 u16 reserved4;
345#endif
346 u32 reserved5[5];
347 union bnx2i_cmd_resp_task_stat task_stat;
348 u32 reserved6;
349#if defined(__BIG_ENDIAN)
350 u16 reserved7;
351 u16 itt;
352#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
353#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
354#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
355#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
356#elif defined(__LITTLE_ENDIAN)
357 u16 itt;
358#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
359#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
360#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
361#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
362 u16 reserved7;
363#endif
364 u32 cq_req_sn;
365};
366
367
368
369/*
370 * firmware middle-path request SQ WQE
371 */
372struct bnx2i_fw_mp_request {
373#if defined(__BIG_ENDIAN)
374 u8 op_code;
375 u8 op_attr;
376 u16 hdr_opaque1;
377#elif defined(__LITTLE_ENDIAN)
378 u16 hdr_opaque1;
379 u8 op_attr;
380 u8 op_code;
381#endif
382 u32 data_length;
383 u32 hdr_opaque2[2];
384#if defined(__BIG_ENDIAN)
385 u16 reserved0;
386 u16 itt;
387#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
388#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
389#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
390#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
391#elif defined(__LITTLE_ENDIAN)
392 u16 itt;
393#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
394#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
395#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
396#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
397 u16 reserved0;
398#endif
399 u32 hdr_opaque3[4];
400 u32 resp_bd_list_addr_lo;
401 u32 resp_bd_list_addr_hi;
402 u32 resp_buffer;
403#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
404#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
405#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24)
406#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24
407#if defined(__BIG_ENDIAN)
408 u16 reserved4;
409 u8 reserved3;
410 u8 flags;
411#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
412#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
413#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
414#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
415#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
416#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
417#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
418#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
419#elif defined(__LITTLE_ENDIAN)
420 u8 flags;
421#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
422#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
423#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
424#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
425#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
426#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
427#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
428#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
429 u8 reserved3;
430 u16 reserved4;
431#endif
432 u32 bd_list_addr_lo;
433 u32 bd_list_addr_hi;
434#if defined(__BIG_ENDIAN)
435 u8 cq_index;
436 u8 reserved6;
437 u8 reserved5;
438 u8 num_bds;
439#elif defined(__LITTLE_ENDIAN)
440 u8 num_bds;
441 u8 reserved5;
442 u8 reserved6;
443 u8 cq_index;
444#endif
445};
446
447
448/*
449 * firmware response - CQE: used only by firmware
450 */
451struct bnx2i_fw_response {
452 u32 hdr_dword1[2];
453 u32 hdr_exp_cmd_sn;
454 u32 hdr_max_cmd_sn;
455 u32 hdr_ttt;
456 u32 hdr_res_cnt;
457 u32 cqe_flags;
458#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0)
459#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0
460#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8)
461#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8
462#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16)
463#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16
464 u32 stat_sn;
465 u32 hdr_dword2[2];
466 u32 hdr_dword3[2];
467 u32 task_stat;
468 u32 reserved0;
469 u32 hdr_itt;
470 u32 cq_req_sn;
471};
472
473
474/*
475 * iSCSI KCQ CQE parameters
476 */
477union iscsi_kcqe_params {
478 u32 reserved0[4];
479};
480
481/*
482 * iSCSI KCQ CQE
483 */
484struct iscsi_kcqe {
485 u32 iscsi_conn_id;
486 u32 completion_status;
487 u32 iscsi_conn_context_id;
488 union iscsi_kcqe_params params;
489#if defined(__BIG_ENDIAN)
490 u8 flags;
491#define ISCSI_KCQE_RESERVED0 (0xF<<0)
492#define ISCSI_KCQE_RESERVED0_SHIFT 0
493#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
494#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
495#define ISCSI_KCQE_RESERVED1 (0x1<<7)
496#define ISCSI_KCQE_RESERVED1_SHIFT 7
497 u8 op_code;
498 u16 qe_self_seq;
499#elif defined(__LITTLE_ENDIAN)
500 u16 qe_self_seq;
501 u8 op_code;
502 u8 flags;
503#define ISCSI_KCQE_RESERVED0 (0xF<<0)
504#define ISCSI_KCQE_RESERVED0_SHIFT 0
505#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
506#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
507#define ISCSI_KCQE_RESERVED1 (0x1<<7)
508#define ISCSI_KCQE_RESERVED1_SHIFT 7
509#endif
510};
511
512
513
514/*
515 * iSCSI KWQE header
516 */
517struct iscsi_kwqe_header {
518#if defined(__BIG_ENDIAN)
519 u8 flags;
520#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
521#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
522#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
523#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
524#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
525#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
526 u8 op_code;
527#elif defined(__LITTLE_ENDIAN)
528 u8 op_code;
529 u8 flags;
530#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
531#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
532#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
533#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
534#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
535#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
536#endif
537};
538
539/*
540 * iSCSI firmware init request 1
541 */
542struct iscsi_kwqe_init1 {
543#if defined(__BIG_ENDIAN)
544 struct iscsi_kwqe_header hdr;
545 u8 reserved0;
546 u8 num_cqs;
547#elif defined(__LITTLE_ENDIAN)
548 u8 num_cqs;
549 u8 reserved0;
550 struct iscsi_kwqe_header hdr;
551#endif
552 u32 dummy_buffer_addr_lo;
553 u32 dummy_buffer_addr_hi;
554#if defined(__BIG_ENDIAN)
555 u16 num_ccells_per_conn;
556 u16 num_tasks_per_conn;
557#elif defined(__LITTLE_ENDIAN)
558 u16 num_tasks_per_conn;
559 u16 num_ccells_per_conn;
560#endif
561#if defined(__BIG_ENDIAN)
562 u16 sq_wqes_per_page;
563 u16 sq_num_wqes;
564#elif defined(__LITTLE_ENDIAN)
565 u16 sq_num_wqes;
566 u16 sq_wqes_per_page;
567#endif
568#if defined(__BIG_ENDIAN)
569 u8 cq_log_wqes_per_page;
570 u8 flags;
571#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
572#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
573#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
574#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
575#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
576#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
577#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
578#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
579 u16 cq_num_wqes;
580#elif defined(__LITTLE_ENDIAN)
581 u16 cq_num_wqes;
582 u8 flags;
583#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
584#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
585#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
586#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
587#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
588#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
589#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
590#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
591 u8 cq_log_wqes_per_page;
592#endif
593#if defined(__BIG_ENDIAN)
594 u16 cq_num_pages;
595 u16 sq_num_pages;
596#elif defined(__LITTLE_ENDIAN)
597 u16 sq_num_pages;
598 u16 cq_num_pages;
599#endif
600#if defined(__BIG_ENDIAN)
601 u16 rq_buffer_size;
602 u16 rq_num_wqes;
603#elif defined(__LITTLE_ENDIAN)
604 u16 rq_num_wqes;
605 u16 rq_buffer_size;
606#endif
607};
608
609/*
610 * iSCSI firmware init request 2
611 */
612struct iscsi_kwqe_init2 {
613#if defined(__BIG_ENDIAN)
614 struct iscsi_kwqe_header hdr;
615 u16 max_cq_sqn;
616#elif defined(__LITTLE_ENDIAN)
617 u16 max_cq_sqn;
618 struct iscsi_kwqe_header hdr;
619#endif
620 u32 error_bit_map[2];
621 u32 reserved1[5];
622};
623
624/*
625 * Initial iSCSI connection offload request 1
626 */
627struct iscsi_kwqe_conn_offload1 {
628#if defined(__BIG_ENDIAN)
629 struct iscsi_kwqe_header hdr;
630 u16 iscsi_conn_id;
631#elif defined(__LITTLE_ENDIAN)
632 u16 iscsi_conn_id;
633 struct iscsi_kwqe_header hdr;
634#endif
635 u32 sq_page_table_addr_lo;
636 u32 sq_page_table_addr_hi;
637 u32 cq_page_table_addr_lo;
638 u32 cq_page_table_addr_hi;
639 u32 reserved0[3];
640};
641
642/*
643 * iSCSI Page Table Entry (PTE)
644 */
645struct iscsi_pte {
646 u32 hi;
647 u32 lo;
648};
649
650/*
651 * Initial iSCSI connection offload request 2
652 */
653struct iscsi_kwqe_conn_offload2 {
654#if defined(__BIG_ENDIAN)
655 struct iscsi_kwqe_header hdr;
656 u16 reserved0;
657#elif defined(__LITTLE_ENDIAN)
658 u16 reserved0;
659 struct iscsi_kwqe_header hdr;
660#endif
661 u32 rq_page_table_addr_lo;
662 u32 rq_page_table_addr_hi;
663 struct iscsi_pte sq_first_pte;
664 struct iscsi_pte cq_first_pte;
665 u32 num_additional_wqes;
666};
667
668
669/*
670 * Initial iSCSI connection offload request 3
671 */
672struct iscsi_kwqe_conn_offload3 {
673#if defined(__BIG_ENDIAN)
674 struct iscsi_kwqe_header hdr;
675 u16 reserved0;
676#elif defined(__LITTLE_ENDIAN)
677 u16 reserved0;
678 struct iscsi_kwqe_header hdr;
679#endif
680 u32 reserved1;
681 struct iscsi_pte qp_first_pte[3];
682};
683
684
685/*
686 * iSCSI connection update request
687 */
688struct iscsi_kwqe_conn_update {
689#if defined(__BIG_ENDIAN)
690 struct iscsi_kwqe_header hdr;
691 u16 reserved0;
692#elif defined(__LITTLE_ENDIAN)
693 u16 reserved0;
694 struct iscsi_kwqe_header hdr;
695#endif
696#if defined(__BIG_ENDIAN)
697 u8 session_error_recovery_level;
698 u8 max_outstanding_r2ts;
699 u8 reserved2;
700 u8 conn_flags;
701#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
702#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
703#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
704#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
705#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
706#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
707#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
708#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
709#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
710#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
711#elif defined(__LITTLE_ENDIAN)
712 u8 conn_flags;
713#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
714#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
715#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
716#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
717#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
718#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
719#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
720#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
721#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
722#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
723 u8 reserved2;
724 u8 max_outstanding_r2ts;
725 u8 session_error_recovery_level;
726#endif
727 u32 context_id;
728 u32 max_send_pdu_length;
729 u32 max_recv_pdu_length;
730 u32 first_burst_length;
731 u32 max_burst_length;
732 u32 exp_stat_sn;
733};
734
735/*
736 * iSCSI destroy connection request
737 */
738struct iscsi_kwqe_conn_destroy {
739#if defined(__BIG_ENDIAN)
740 struct iscsi_kwqe_header hdr;
741 u16 reserved0;
742#elif defined(__LITTLE_ENDIAN)
743 u16 reserved0;
744 struct iscsi_kwqe_header hdr;
745#endif
746 u32 context_id;
747 u32 reserved1[6];
748};
749
750/*
751 * iSCSI KWQ WQE
752 */
753union iscsi_kwqe {
754 struct iscsi_kwqe_init1 init1;
755 struct iscsi_kwqe_init2 init2;
756 struct iscsi_kwqe_conn_offload1 conn_offload1;
757 struct iscsi_kwqe_conn_offload2 conn_offload2;
758 struct iscsi_kwqe_conn_update conn_update;
759 struct iscsi_kwqe_conn_destroy conn_destroy;
760};
761
762/*
763 * iSCSI Login SQ WQE
764 */
765struct bnx2i_login_request {
766#if defined(__BIG_ENDIAN)
767 u8 op_code;
768 u8 op_attr;
769#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
770#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
771#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
772#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
773#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
774#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
775#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
776#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
777#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
778#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
779 u8 version_max;
780 u8 version_min;
781#elif defined(__LITTLE_ENDIAN)
782 u8 version_min;
783 u8 version_max;
784 u8 op_attr;
785#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
786#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
787#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
788#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
789#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
790#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
791#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
792#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
793#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
794#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
795 u8 op_code;
796#endif
797 u32 data_length;
798 u32 isid_lo;
799#if defined(__BIG_ENDIAN)
800 u16 isid_hi;
801 u16 tsih;
802#elif defined(__LITTLE_ENDIAN)
803 u16 tsih;
804 u16 isid_hi;
805#endif
806#if defined(__BIG_ENDIAN)
807 u16 reserved2;
808 u16 itt;
809#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
810#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
811#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
812#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
813#elif defined(__LITTLE_ENDIAN)
814 u16 itt;
815#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
816#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
817#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
818#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
819 u16 reserved2;
820#endif
821#if defined(__BIG_ENDIAN)
822 u16 cid;
823 u16 reserved3;
824#elif defined(__LITTLE_ENDIAN)
825 u16 reserved3;
826 u16 cid;
827#endif
828 u32 cmd_sn;
829 u32 exp_stat_sn;
830 u32 reserved4;
831 u32 resp_bd_list_addr_lo;
832 u32 resp_bd_list_addr_hi;
833 u32 resp_buffer;
834#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
835#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
836#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24)
837#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24
838#if defined(__BIG_ENDIAN)
839 u16 reserved8;
840 u8 reserved7;
841 u8 flags;
842#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
843#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
844#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
845#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
846#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
847#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
848#elif defined(__LITTLE_ENDIAN)
849 u8 flags;
850#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
851#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
852#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
853#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
854#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
855#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
856 u8 reserved7;
857 u16 reserved8;
858#endif
859 u32 bd_list_addr_lo;
860 u32 bd_list_addr_hi;
861#if defined(__BIG_ENDIAN)
862 u8 cq_index;
863 u8 reserved10;
864 u8 reserved9;
865 u8 num_bds;
866#elif defined(__LITTLE_ENDIAN)
867 u8 num_bds;
868 u8 reserved9;
869 u8 reserved10;
870 u8 cq_index;
871#endif
872};
873
874
875/*
876 * iSCSI Login CQE
877 */
878struct bnx2i_login_response {
879#if defined(__BIG_ENDIAN)
880 u8 op_code;
881 u8 response_flags;
882#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
883#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
884#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
885#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
886#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
887#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
888#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
889#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
890#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
891#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
892 u8 version_max;
893 u8 version_active;
894#elif defined(__LITTLE_ENDIAN)
895 u8 version_active;
896 u8 version_max;
897 u8 response_flags;
898#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
899#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
900#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
901#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
902#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
903#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
904#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
905#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
906#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
907#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
908 u8 op_code;
909#endif
910 u32 data_length;
911 u32 exp_cmd_sn;
912 u32 max_cmd_sn;
913 u32 reserved1[2];
914#if defined(__BIG_ENDIAN)
915 u16 reserved3;
916 u8 err_code;
917 u8 reserved2;
918#elif defined(__LITTLE_ENDIAN)
919 u8 reserved2;
920 u8 err_code;
921 u16 reserved3;
922#endif
923 u32 stat_sn;
924 u32 isid_lo;
925#if defined(__BIG_ENDIAN)
926 u16 isid_hi;
927 u16 tsih;
928#elif defined(__LITTLE_ENDIAN)
929 u16 tsih;
930 u16 isid_hi;
931#endif
932#if defined(__BIG_ENDIAN)
933 u8 status_class;
934 u8 status_detail;
935 u16 reserved4;
936#elif defined(__LITTLE_ENDIAN)
937 u16 reserved4;
938 u8 status_detail;
939 u8 status_class;
940#endif
941 u32 reserved5[3];
942#if defined(__BIG_ENDIAN)
943 u16 reserved6;
944 u16 itt;
945#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
946#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
947#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
948#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
949#elif defined(__LITTLE_ENDIAN)
950 u16 itt;
951#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
952#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
953#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
954#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
955 u16 reserved6;
956#endif
957 u32 cq_req_sn;
958};
959
960
961/*
962 * iSCSI Logout SQ WQE
963 */
964struct bnx2i_logout_request {
965#if defined(__BIG_ENDIAN)
966 u8 op_code;
967 u8 op_attr;
968#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
969#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
970#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
971#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
972 u16 reserved0;
973#elif defined(__LITTLE_ENDIAN)
974 u16 reserved0;
975 u8 op_attr;
976#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
977#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
978#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
979#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
980 u8 op_code;
981#endif
982 u32 data_length;
983 u32 reserved1[2];
984#if defined(__BIG_ENDIAN)
985 u16 reserved2;
986 u16 itt;
987#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
988#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
989#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
990#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
991#elif defined(__LITTLE_ENDIAN)
992 u16 itt;
993#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
994#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
995#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
996#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
997 u16 reserved2;
998#endif
999#if defined(__BIG_ENDIAN)
1000 u16 cid;
1001 u16 reserved3;
1002#elif defined(__LITTLE_ENDIAN)
1003 u16 reserved3;
1004 u16 cid;
1005#endif
1006 u32 cmd_sn;
1007 u32 reserved4[5];
1008 u32 zero_fill;
1009 u32 bd_list_addr_lo;
1010 u32 bd_list_addr_hi;
1011#if defined(__BIG_ENDIAN)
1012 u8 cq_index;
1013 u8 reserved6;
1014 u8 reserved5;
1015 u8 num_bds;
1016#elif defined(__LITTLE_ENDIAN)
1017 u8 num_bds;
1018 u8 reserved5;
1019 u8 reserved6;
1020 u8 cq_index;
1021#endif
1022};
1023
1024
1025/*
1026 * iSCSI Logout CQE
1027 */
1028struct bnx2i_logout_response {
1029#if defined(__BIG_ENDIAN)
1030 u8 op_code;
1031 u8 reserved1;
1032 u8 response;
1033 u8 reserved0;
1034#elif defined(__LITTLE_ENDIAN)
1035 u8 reserved0;
1036 u8 response;
1037 u8 reserved1;
1038 u8 op_code;
1039#endif
1040 u32 reserved2;
1041 u32 exp_cmd_sn;
1042 u32 max_cmd_sn;
1043 u32 reserved3[2];
1044#if defined(__BIG_ENDIAN)
1045 u16 reserved5;
1046 u8 err_code;
1047 u8 reserved4;
1048#elif defined(__LITTLE_ENDIAN)
1049 u8 reserved4;
1050 u8 err_code;
1051 u16 reserved5;
1052#endif
1053 u32 reserved6[3];
1054#if defined(__BIG_ENDIAN)
1055 u16 time_to_wait;
1056 u16 time_to_retain;
1057#elif defined(__LITTLE_ENDIAN)
1058 u16 time_to_retain;
1059 u16 time_to_wait;
1060#endif
1061 u32 reserved7[3];
1062#if defined(__BIG_ENDIAN)
1063 u16 reserved8;
1064 u16 itt;
1065#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
1066#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
1067#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
1068#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
1069#elif defined(__LITTLE_ENDIAN)
1070 u16 itt;
1071#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
1072#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
1073#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
1074#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
1075 u16 reserved8;
1076#endif
1077 u32 cq_req_sn;
1078};
1079
1080
1081/*
1082 * iSCSI Nop-In CQE
1083 */
1084struct bnx2i_nop_in_msg {
1085#if defined(__BIG_ENDIAN)
1086 u8 op_code;
1087 u8 reserved1;
1088 u16 reserved0;
1089#elif defined(__LITTLE_ENDIAN)
1090 u16 reserved0;
1091 u8 reserved1;
1092 u8 op_code;
1093#endif
1094 u32 data_length;
1095 u32 exp_cmd_sn;
1096 u32 max_cmd_sn;
1097 u32 ttt;
1098 u32 reserved2;
1099#if defined(__BIG_ENDIAN)
1100 u16 reserved4;
1101 u8 err_code;
1102 u8 reserved3;
1103#elif defined(__LITTLE_ENDIAN)
1104 u8 reserved3;
1105 u8 err_code;
1106 u16 reserved4;
1107#endif
1108 u32 reserved5;
1109 u32 lun[2];
1110 u32 reserved6[4];
1111#if defined(__BIG_ENDIAN)
1112 u16 reserved7;
1113 u16 itt;
1114#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
1115#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
1116#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
1117#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
1118#elif defined(__LITTLE_ENDIAN)
1119 u16 itt;
1120#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
1121#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
1122#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
1123#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
1124 u16 reserved7;
1125#endif
1126 u32 cq_req_sn;
1127};
1128
1129
1130/*
1131 * iSCSI NOP-OUT SQ WQE
1132 */
1133struct bnx2i_nop_out_request {
1134#if defined(__BIG_ENDIAN)
1135 u8 op_code;
1136 u8 op_attr;
1137#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
1138#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
1139#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
1140#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
1141 u16 reserved0;
1142#elif defined(__LITTLE_ENDIAN)
1143 u16 reserved0;
1144 u8 op_attr;
1145#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
1146#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
1147#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
1148#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
1149 u8 op_code;
1150#endif
1151 u32 data_length;
1152 u32 lun[2];
1153#if defined(__BIG_ENDIAN)
1154 u16 reserved2;
1155 u16 itt;
1156#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
1157#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
1158#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
1159#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
1160#elif defined(__LITTLE_ENDIAN)
1161 u16 itt;
1162#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
1163#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
1164#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
1165#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
1166 u16 reserved2;
1167#endif
1168 u32 ttt;
1169 u32 cmd_sn;
1170 u32 reserved3[2];
1171 u32 resp_bd_list_addr_lo;
1172 u32 resp_bd_list_addr_hi;
1173 u32 resp_buffer;
1174#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
1175#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
1176#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24)
1177#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24
1178#if defined(__BIG_ENDIAN)
1179 u16 reserved7;
1180 u8 reserved6;
1181 u8 flags;
1182#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
1183#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
1184#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
1185#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
1186#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
1187#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
1188#elif defined(__LITTLE_ENDIAN)
1189 u8 flags;
1190#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
1191#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
1192#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
1193#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
1194#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
1195#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
1196 u8 reserved6;
1197 u16 reserved7;
1198#endif
1199 u32 bd_list_addr_lo;
1200 u32 bd_list_addr_hi;
1201#if defined(__BIG_ENDIAN)
1202 u8 cq_index;
1203 u8 reserved9;
1204 u8 reserved8;
1205 u8 num_bds;
1206#elif defined(__LITTLE_ENDIAN)
1207 u8 num_bds;
1208 u8 reserved8;
1209 u8 reserved9;
1210 u8 cq_index;
1211#endif
1212};
1213
1214/*
1215 * iSCSI Reject CQE
1216 */
1217struct bnx2i_reject_msg {
1218#if defined(__BIG_ENDIAN)
1219 u8 op_code;
1220 u8 reserved1;
1221 u8 reason;
1222 u8 reserved0;
1223#elif defined(__LITTLE_ENDIAN)
1224 u8 reserved0;
1225 u8 reason;
1226 u8 reserved1;
1227 u8 op_code;
1228#endif
1229 u32 data_length;
1230 u32 exp_cmd_sn;
1231 u32 max_cmd_sn;
1232 u32 reserved2[2];
1233#if defined(__BIG_ENDIAN)
1234 u16 reserved4;
1235 u8 err_code;
1236 u8 reserved3;
1237#elif defined(__LITTLE_ENDIAN)
1238 u8 reserved3;
1239 u8 err_code;
1240 u16 reserved4;
1241#endif
1242 u32 reserved5[8];
1243 u32 cq_req_sn;
1244};
1245
1246/*
1247 * bnx2i iSCSI TMF SQ WQE
1248 */
1249struct bnx2i_tmf_request {
1250#if defined(__BIG_ENDIAN)
1251 u8 op_code;
1252 u8 op_attr;
1253#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
1254#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
1255#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
1256#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
1257 u16 reserved0;
1258#elif defined(__LITTLE_ENDIAN)
1259 u16 reserved0;
1260 u8 op_attr;
1261#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
1262#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
1263#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
1264#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
1265 u8 op_code;
1266#endif
1267 u32 data_length;
1268 u32 lun[2];
1269#if defined(__BIG_ENDIAN)
1270 u16 reserved1;
1271 u16 itt;
1272#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
1273#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
1274#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
1275#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
1276#elif defined(__LITTLE_ENDIAN)
1277 u16 itt;
1278#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
1279#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
1280#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
1281#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
1282 u16 reserved1;
1283#endif
1284 u32 ref_itt;
1285 u32 cmd_sn;
1286 u32 reserved2;
1287 u32 ref_cmd_sn;
1288 u32 reserved3[3];
1289 u32 zero_fill;
1290 u32 bd_list_addr_lo;
1291 u32 bd_list_addr_hi;
1292#if defined(__BIG_ENDIAN)
1293 u8 cq_index;
1294 u8 reserved5;
1295 u8 reserved4;
1296 u8 num_bds;
1297#elif defined(__LITTLE_ENDIAN)
1298 u8 num_bds;
1299 u8 reserved4;
1300 u8 reserved5;
1301 u8 cq_index;
1302#endif
1303};
1304
1305/*
1306 * iSCSI Text SQ WQE
1307 */
1308struct bnx2i_text_request {
1309#if defined(__BIG_ENDIAN)
1310 u8 op_code;
1311 u8 op_attr;
1312#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
1313#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
1314#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
1315#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
1316#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
1317#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
1318 u16 reserved0;
1319#elif defined(__LITTLE_ENDIAN)
1320 u16 reserved0;
1321 u8 op_attr;
1322#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
1323#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
1324#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
1325#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
1326#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
1327#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
1328 u8 op_code;
1329#endif
1330 u32 data_length;
1331 u32 lun[2];
1332#if defined(__BIG_ENDIAN)
1333 u16 reserved3;
1334 u16 itt;
1335#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
1336#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
1337#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
1338#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
1339#elif defined(__LITTLE_ENDIAN)
1340 u16 itt;
1341#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
1342#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
1343#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
1344#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
1345 u16 reserved3;
1346#endif
1347 u32 ttt;
1348 u32 cmd_sn;
1349 u32 reserved4[2];
1350 u32 resp_bd_list_addr_lo;
1351 u32 resp_bd_list_addr_hi;
1352 u32 resp_buffer;
1353#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
1354#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
1355#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24)
1356#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24
1357 u32 zero_fill;
1358 u32 bd_list_addr_lo;
1359 u32 bd_list_addr_hi;
1360#if defined(__BIG_ENDIAN)
1361 u8 cq_index;
1362 u8 reserved7;
1363 u8 reserved6;
1364 u8 num_bds;
1365#elif defined(__LITTLE_ENDIAN)
1366 u8 num_bds;
1367 u8 reserved6;
1368 u8 reserved7;
1369 u8 cq_index;
1370#endif
1371};
1372
1373/*
1374 * iSCSI SQ WQE
1375 */
1376union iscsi_request {
1377 struct bnx2i_cmd_request cmd;
1378 struct bnx2i_tmf_request tmf;
1379 struct bnx2i_nop_out_request nop_out;
1380 struct bnx2i_login_request login_req;
1381 struct bnx2i_text_request text;
1382 struct bnx2i_logout_request logout_req;
1383 struct bnx2i_cleanup_request cleanup;
1384};
1385
1386
1387/*
1388 * iSCSI TMF CQE
1389 */
1390struct bnx2i_tmf_response {
1391#if defined(__BIG_ENDIAN)
1392 u8 op_code;
1393 u8 reserved1;
1394 u8 response;
1395 u8 reserved0;
1396#elif defined(__LITTLE_ENDIAN)
1397 u8 reserved0;
1398 u8 response;
1399 u8 reserved1;
1400 u8 op_code;
1401#endif
1402 u32 reserved2;
1403 u32 exp_cmd_sn;
1404 u32 max_cmd_sn;
1405 u32 reserved3[2];
1406#if defined(__BIG_ENDIAN)
1407 u16 reserved5;
1408 u8 err_code;
1409 u8 reserved4;
1410#elif defined(__LITTLE_ENDIAN)
1411 u8 reserved4;
1412 u8 err_code;
1413 u16 reserved5;
1414#endif
1415 u32 reserved6[7];
1416#if defined(__BIG_ENDIAN)
1417 u16 reserved7;
1418 u16 itt;
1419#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
1420#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
1421#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
1422#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
1423#elif defined(__LITTLE_ENDIAN)
1424 u16 itt;
1425#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
1426#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
1427#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
1428#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
1429 u16 reserved7;
1430#endif
1431 u32 cq_req_sn;
1432};
1433
1434/*
1435 * iSCSI Text CQE
1436 */
1437struct bnx2i_text_response {
1438#if defined(__BIG_ENDIAN)
1439 u8 op_code;
1440 u8 response_flags;
1441#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
1442#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
1443#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
1444#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
1445#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
1446#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
1447 u16 reserved0;
1448#elif defined(__LITTLE_ENDIAN)
1449 u16 reserved0;
1450 u8 response_flags;
1451#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
1452#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
1453#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
1454#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
1455#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
1456#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
1457 u8 op_code;
1458#endif
1459 u32 data_length;
1460 u32 exp_cmd_sn;
1461 u32 max_cmd_sn;
1462 u32 ttt;
1463 u32 reserved2;
1464#if defined(__BIG_ENDIAN)
1465 u16 reserved4;
1466 u8 err_code;
1467 u8 reserved3;
1468#elif defined(__LITTLE_ENDIAN)
1469 u8 reserved3;
1470 u8 err_code;
1471 u16 reserved4;
1472#endif
1473 u32 reserved5;
1474 u32 lun[2];
1475 u32 reserved6[4];
1476#if defined(__BIG_ENDIAN)
1477 u16 reserved7;
1478 u16 itt;
1479#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
1480#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
1481#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
1482#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
1483#elif defined(__LITTLE_ENDIAN)
1484 u16 itt;
1485#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
1486#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
1487#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
1488#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
1489 u16 reserved7;
1490#endif
1491 u32 cq_req_sn;
1492};
1493
1494/*
1495 * iSCSI CQE
1496 */
1497union iscsi_response {
1498 struct bnx2i_cmd_response cmd;
1499 struct bnx2i_tmf_response tmf;
1500 struct bnx2i_login_response login_resp;
1501 struct bnx2i_text_response text;
1502 struct bnx2i_logout_response logout_resp;
1503 struct bnx2i_cleanup_response cleanup;
1504 struct bnx2i_reject_msg reject;
1505 struct bnx2i_async_msg async;
1506 struct bnx2i_nop_in_msg nop_in;
1507};
1508
1509#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
new file mode 100644
index 000000000000..1e9f7141102b
--- /dev/null
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -0,0 +1,10 @@
1config SCSI_BNX2_ISCSI
2 tristate "Broadcom NetXtreme II iSCSI support"
3 select SCSI_ISCSI_ATTRS
4 select NETDEVICES
5 select NETDEV_1000
6 select CNIC
7 depends on PCI
8 ---help---
9 This driver supports iSCSI offload for the Broadcom NetXtreme II
10 devices.
diff --git a/drivers/scsi/bnx2i/Makefile b/drivers/scsi/bnx2i/Makefile
new file mode 100644
index 000000000000..b5802bd2e76a
--- /dev/null
+++ b/drivers/scsi/bnx2i/Makefile
@@ -0,0 +1,3 @@
1bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o
2
3obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
new file mode 100644
index 000000000000..d7576f28c6e9
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -0,0 +1,771 @@
1/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 */
13
14#ifndef _BNX2I_H_
15#define _BNX2I_H_
16
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19
20#include <linux/errno.h>
21#include <linux/pci.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/in.h>
26#include <linux/kfifo.h>
27#include <linux/netdevice.h>
28#include <linux/completion.h>
29
30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi.h>
35#include <scsi/iscsi_proto.h>
36#include <scsi/libiscsi.h>
37#include <scsi/scsi_transport_iscsi.h>
38
39#include "../../net/cnic_if.h"
40#include "57xx_iscsi_hsi.h"
41#include "57xx_iscsi_constants.h"
42
43#define BNX2_ISCSI_DRIVER_NAME "bnx2i"
44
45#define BNX2I_MAX_ADAPTERS 8
46
47#define ISCSI_MAX_CONNS_PER_HBA 128
48#define ISCSI_MAX_SESS_PER_HBA ISCSI_MAX_CONNS_PER_HBA
49#define ISCSI_MAX_CMDS_PER_SESS 128
50
51/* Total active commands across all connections supported by devices */
52#define ISCSI_MAX_CMDS_PER_HBA_5708 (28 * (ISCSI_MAX_CMDS_PER_SESS - 1))
53#define ISCSI_MAX_CMDS_PER_HBA_5709 (128 * (ISCSI_MAX_CMDS_PER_SESS - 1))
54#define ISCSI_MAX_CMDS_PER_HBA_57710 (256 * (ISCSI_MAX_CMDS_PER_SESS - 1))
55
56#define ISCSI_MAX_BDS_PER_CMD 32
57
58#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
59#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
60
61/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
62#define MAX_BD_LENGTH 65535
63#define BD_SPLIT_SIZE 32768
64
65/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
66#define BNX2I_SQ_WQES_MIN 16
67#define BNX2I_570X_SQ_WQES_MAX 128
68#define BNX2I_5770X_SQ_WQES_MAX 512
69#define BNX2I_570X_SQ_WQES_DEFAULT 128
70#define BNX2I_5770X_SQ_WQES_DEFAULT 256
71
72#define BNX2I_570X_CQ_WQES_MAX 128
73#define BNX2I_5770X_CQ_WQES_MAX 512
74
75#define BNX2I_RQ_WQES_MIN 16
76#define BNX2I_RQ_WQES_MAX 32
77#define BNX2I_RQ_WQES_DEFAULT 16
78
79/* CCELLs per conn */
80#define BNX2I_CCELLS_MIN 16
81#define BNX2I_CCELLS_MAX 96
82#define BNX2I_CCELLS_DEFAULT 64
83
84#define ITT_INVALID_SIGNATURE 0xFFFF
85
86#define ISCSI_CMD_CLEANUP_TIMEOUT 100
87
88#define BNX2I_CONN_CTX_BUF_SIZE 16384
89
90#define BNX2I_SQ_WQE_SIZE 64
91#define BNX2I_RQ_WQE_SIZE 256
92#define BNX2I_CQE_SIZE 64
93
94#define MB_KERNEL_CTX_SHIFT 8
95#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT)
96
97#define CTX_SHIFT 7
98#define GET_CID_NUM(cid_addr) ((cid_addr) >> CTX_SHIFT)
99
100#define CTX_OFFSET 0x10000
101#define MAX_CID_CNT 0x4000
102
103/* 5709 context registers */
104#define BNX2_MQ_CONFIG2 0x00003d00
105#define BNX2_MQ_CONFIG2_CONT_SZ (0x7L<<4)
106#define BNX2_MQ_CONFIG2_FIRST_L4L5 (0x1fL<<8)
107
108/* 57710's BAR2 is mapped to doorbell registers */
109#define BNX2X_DOORBELL_PCI_BAR 2
110#define BNX2X_MAX_CQS 8
111
112#define CNIC_ARM_CQE 1
113#define CNIC_DISARM_CQE 0
114
115#define REG_RD(__hba, offset) \
116 readl(__hba->regview + offset)
117#define REG_WR(__hba, offset, val) \
118 writel(val, __hba->regview + offset)
119
120
121/**
122 * struct generic_pdu_resc - login pdu resource structure
123 *
124 * @req_buf: driver buffer used to stage payload associated with
125 * the login request
126 * @req_dma_addr: dma address for iscsi login request payload buffer
127 * @req_buf_size: actual login request payload length
128 * @req_wr_ptr: pointer into login request buffer when next data is
129 * to be written
130 * @resp_hdr: iscsi header where iscsi login response header is to
131 * be recreated
132 * @resp_buf: buffer to stage login response payload
133 * @resp_dma_addr: login response payload buffer dma address
134 * @resp_buf_size: login response paylod length
135 * @resp_wr_ptr: pointer into login response buffer when next data is
136 * to be written
137 * @req_bd_tbl: iscsi login request payload BD table
138 * @req_bd_dma: login request BD table dma address
139 * @resp_bd_tbl: iscsi login response payload BD table
140 * @resp_bd_dma: login request BD table dma address
141 *
142 * following structure defines buffer info for generic pdus such as iSCSI Login,
143 * Logout and NOP
144 */
145struct generic_pdu_resc {
146 char *req_buf;
147 dma_addr_t req_dma_addr;
148 u32 req_buf_size;
149 char *req_wr_ptr;
150 struct iscsi_hdr resp_hdr;
151 char *resp_buf;
152 dma_addr_t resp_dma_addr;
153 u32 resp_buf_size;
154 char *resp_wr_ptr;
155 char *req_bd_tbl;
156 dma_addr_t req_bd_dma;
157 char *resp_bd_tbl;
158 dma_addr_t resp_bd_dma;
159};
160
161
162/**
163 * struct bd_resc_page - tracks DMA'able memory allocated for BD tables
164 *
165 * @link: list head to link elements
166 * @max_ptrs: maximun pointers that can be stored in this page
167 * @num_valid: number of pointer valid in this page
168 * @page: base addess for page pointer array
169 *
170 * structure to track DMA'able memory allocated for command BD tables
171 */
172struct bd_resc_page {
173 struct list_head link;
174 u32 max_ptrs;
175 u32 num_valid;
176 void *page[1];
177};
178
179
180/**
181 * struct io_bdt - I/O buffer destricptor table
182 *
183 * @bd_tbl: BD table's virtual address
184 * @bd_tbl_dma: BD table's dma address
185 * @bd_valid: num valid BD entries
186 *
187 * IO BD table
188 */
189struct io_bdt {
190 struct iscsi_bd *bd_tbl;
191 dma_addr_t bd_tbl_dma;
192 u16 bd_valid;
193};
194
195
196/**
197 * bnx2i_cmd - iscsi command structure
198 *
199 * @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd
200 * @sg: SG list
201 * @io_tbl: buffer descriptor (BD) table
202 * @bd_tbl_dma: buffer descriptor (BD) table's dma address
203 */
204struct bnx2i_cmd {
205 struct iscsi_hdr hdr;
206 struct bnx2i_conn *conn;
207 struct scsi_cmnd *scsi_cmd;
208 struct scatterlist *sg;
209 struct io_bdt io_tbl;
210 dma_addr_t bd_tbl_dma;
211 struct bnx2i_cmd_request req;
212};
213
214
215/**
216 * struct bnx2i_conn - iscsi connection structure
217 *
218 * @cls_conn: pointer to iscsi cls conn
219 * @hba: adapter structure pointer
220 * @iscsi_conn_cid: iscsi conn id
221 * @fw_cid: firmware iscsi context id
222 * @ep: endpoint structure pointer
223 * @gen_pdu: login/nopout/logout pdu resources
224 * @violation_notified: bit mask used to track iscsi error/warning messages
225 * already printed out
226 *
227 * iSCSI connection structure
228 */
229struct bnx2i_conn {
230 struct iscsi_cls_conn *cls_conn;
231 struct bnx2i_hba *hba;
232 struct completion cmd_cleanup_cmpl;
233 int is_bound;
234
235 u32 iscsi_conn_cid;
236#define BNX2I_CID_RESERVED 0x5AFF
237 u32 fw_cid;
238
239 struct timer_list poll_timer;
240 /*
241 * Queue Pair (QP) related structure elements.
242 */
243 struct bnx2i_endpoint *ep;
244
245 /*
246 * Buffer for login negotiation process
247 */
248 struct generic_pdu_resc gen_pdu;
249 u64 violation_notified;
250};
251
252
253
254/**
255 * struct iscsi_cid_queue - Per adapter iscsi cid queue
256 *
257 * @cid_que_base: queue base memory
258 * @cid_que: queue memory pointer
259 * @cid_q_prod_idx: produce index
260 * @cid_q_cons_idx: consumer index
261 * @cid_q_max_idx: max index. used to detect wrap around condition
262 * @cid_free_cnt: queue size
263 * @conn_cid_tbl: iscsi cid to conn structure mapping table
264 *
265 * Per adapter iSCSI CID Queue
266 */
267struct iscsi_cid_queue {
268 void *cid_que_base;
269 u32 *cid_que;
270 u32 cid_q_prod_idx;
271 u32 cid_q_cons_idx;
272 u32 cid_q_max_idx;
273 u32 cid_free_cnt;
274 struct bnx2i_conn **conn_cid_tbl;
275};
276
277/**
278 * struct bnx2i_hba - bnx2i adapter structure
279 *
280 * @link: list head to link elements
281 * @cnic: pointer to cnic device
282 * @pcidev: pointer to pci dev
283 * @netdev: pointer to netdev structure
284 * @regview: mapped PCI register space
285 * @age: age, incremented by every recovery
286 * @cnic_dev_type: cnic device type, 5706/5708/5709/57710
287 * @mail_queue_access: mailbox queue access mode, applicable to 5709 only
288 * @reg_with_cnic: indicates whether the device is register with CNIC
289 * @adapter_state: adapter state, UP, GOING_DOWN, LINK_DOWN
290 * @mtu_supported: Ethernet MTU supported
291 * @shost: scsi host pointer
292 * @max_sqes: SQ size
293 * @max_rqes: RQ size
294 * @max_cqes: CQ size
295 * @num_ccell: number of command cells per connection
296 * @ofld_conns_active: active connection list
297 * @max_active_conns: max offload connections supported by this device
298 * @cid_que: iscsi cid queue
299 * @ep_rdwr_lock: read / write lock to synchronize various ep lists
300 * @ep_ofld_list: connection list for pending offload completion
301 * @ep_destroy_list: connection list for pending offload completion
302 * @mp_bd_tbl: BD table to be used with middle path requests
303 * @mp_bd_dma: DMA address of 'mp_bd_tbl' memory buffer
304 * @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs
305 * @dummy_buf_dma: DMA address of 'dummy_buffer' memory buffer
306 * @lock: lock to synchonize access to hba structure
307 * @pci_did: PCI device ID
308 * @pci_vid: PCI vendor ID
309 * @pci_sdid: PCI subsystem device ID
310 * @pci_svid: PCI subsystem vendor ID
311 * @pci_func: PCI function number in system pci tree
312 * @pci_devno: PCI device number in system pci tree
313 * @num_wqe_sent: statistic counter, total wqe's sent
314 * @num_cqe_rcvd: statistic counter, total cqe's received
315 * @num_intr_claimed: statistic counter, total interrupts claimed
316 * @link_changed_count: statistic counter, num of link change notifications
317 * received
318 * @ipaddr_changed_count: statistic counter, num times IP address changed while
319 * at least one connection is offloaded
320 * @num_sess_opened: statistic counter, total num sessions opened
321 * @num_conn_opened: statistic counter, total num conns opened on this hba
322 * @ctx_ccell_tasks: captures number of ccells and tasks supported by
323 * currently offloaded connection, used to decode
324 * context memory
325 *
326 * Adapter Data Structure
327 */
328struct bnx2i_hba {
329 struct list_head link;
330 struct cnic_dev *cnic;
331 struct pci_dev *pcidev;
332 struct net_device *netdev;
333 void __iomem *regview;
334
335 u32 age;
336 unsigned long cnic_dev_type;
337 #define BNX2I_NX2_DEV_5706 0x0
338 #define BNX2I_NX2_DEV_5708 0x1
339 #define BNX2I_NX2_DEV_5709 0x2
340 #define BNX2I_NX2_DEV_57710 0x3
341 u32 mail_queue_access;
342 #define BNX2I_MQ_KERNEL_MODE 0x0
343 #define BNX2I_MQ_KERNEL_BYPASS_MODE 0x1
344 #define BNX2I_MQ_BIN_MODE 0x2
345 unsigned long reg_with_cnic;
346 #define BNX2I_CNIC_REGISTERED 1
347
348 unsigned long adapter_state;
349 #define ADAPTER_STATE_UP 0
350 #define ADAPTER_STATE_GOING_DOWN 1
351 #define ADAPTER_STATE_LINK_DOWN 2
352 #define ADAPTER_STATE_INIT_FAILED 31
353 unsigned int mtu_supported;
354 #define BNX2I_MAX_MTU_SUPPORTED 1500
355
356 struct Scsi_Host *shost;
357
358 u32 max_sqes;
359 u32 max_rqes;
360 u32 max_cqes;
361 u32 num_ccell;
362
363 int ofld_conns_active;
364
365 int max_active_conns;
366 struct iscsi_cid_queue cid_que;
367
368 rwlock_t ep_rdwr_lock;
369 struct list_head ep_ofld_list;
370 struct list_head ep_destroy_list;
371
372 /*
373 * BD table to be used with MP (Middle Path requests.
374 */
375 char *mp_bd_tbl;
376 dma_addr_t mp_bd_dma;
377 char *dummy_buffer;
378 dma_addr_t dummy_buf_dma;
379
380 spinlock_t lock; /* protects hba structure access */
381 struct mutex net_dev_lock;/* sync net device access */
382
383 /*
384 * PCI related info.
385 */
386 u16 pci_did;
387 u16 pci_vid;
388 u16 pci_sdid;
389 u16 pci_svid;
390 u16 pci_func;
391 u16 pci_devno;
392
393 /*
394 * Following are a bunch of statistics useful during development
395 * and later stage for score boarding.
396 */
397 u32 num_wqe_sent;
398 u32 num_cqe_rcvd;
399 u32 num_intr_claimed;
400 u32 link_changed_count;
401 u32 ipaddr_changed_count;
402 u32 num_sess_opened;
403 u32 num_conn_opened;
404 unsigned int ctx_ccell_tasks;
405};
406
407
408/*******************************************************************************
409 * QP [ SQ / RQ / CQ ] info.
410 ******************************************************************************/
411
412/*
413 * SQ/RQ/CQ generic structure definition
414 */
415struct sqe {
416 u8 sqe_byte[BNX2I_SQ_WQE_SIZE];
417};
418
419struct rqe {
420 u8 rqe_byte[BNX2I_RQ_WQE_SIZE];
421};
422
423struct cqe {
424 u8 cqe_byte[BNX2I_CQE_SIZE];
425};
426
427
428enum {
429#if defined(__LITTLE_ENDIAN)
430 CNIC_EVENT_COAL_INDEX = 0x0,
431 CNIC_SEND_DOORBELL = 0x4,
432 CNIC_EVENT_CQ_ARM = 0x7,
433 CNIC_RECV_DOORBELL = 0x8
434#elif defined(__BIG_ENDIAN)
435 CNIC_EVENT_COAL_INDEX = 0x2,
436 CNIC_SEND_DOORBELL = 0x6,
437 CNIC_EVENT_CQ_ARM = 0x4,
438 CNIC_RECV_DOORBELL = 0xa
439#endif
440};
441
442
443/*
444 * CQ DB
445 */
446struct bnx2x_iscsi_cq_pend_cmpl {
447 /* CQ producer, updated by Ustorm */
448 u16 ustrom_prod;
449 /* CQ pending completion counter */
450 u16 pend_cntr;
451};
452
453
454struct bnx2i_5771x_cq_db {
455 struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS];
456 /* CQ pending completion ITT array */
457 u16 itt[BNX2X_MAX_CQS];
458 /* Cstorm CQ sequence to notify array, updated by driver */;
459 u16 sqn[BNX2X_MAX_CQS];
460 u32 reserved[4] /* 16 byte allignment */;
461};
462
463
464struct bnx2i_5771x_sq_rq_db {
465 u16 prod_idx;
466 u8 reserved0[14]; /* Pad structure size to 16 bytes */
467};
468
469
470struct bnx2i_5771x_dbell_hdr {
471 u8 header;
472 /* 1 for rx doorbell, 0 for tx doorbell */
473#define B577XX_DOORBELL_HDR_RX (0x1<<0)
474#define B577XX_DOORBELL_HDR_RX_SHIFT 0
475 /* 0 for normal doorbell, 1 for advertise wnd doorbell */
476#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1)
477#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1
478 /* rdma tx only: DPM transaction size specifier (64/128/256/512B) */
479#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2)
480#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2
481 /* connection type */
482#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4)
483#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4
484};
485
486struct bnx2i_5771x_dbell {
487 struct bnx2i_5771x_dbell_hdr dbell;
488 u8 pad[3];
489
490};
491
492/**
493 * struct qp_info - QP (share queue region) atrributes structure
494 *
495 * @ctx_base: ioremapped pci register base to access doorbell register
496 * pertaining to this offloaded connection
497 * @sq_virt: virtual address of send queue (SQ) region
498 * @sq_phys: DMA address of SQ memory region
499 * @sq_mem_size: SQ size
500 * @sq_prod_qe: SQ producer entry pointer
501 * @sq_cons_qe: SQ consumer entry pointer
502 * @sq_first_qe: virtaul address of first entry in SQ
503 * @sq_last_qe: virtaul address of last entry in SQ
504 * @sq_prod_idx: SQ producer index
505 * @sq_cons_idx: SQ consumer index
506 * @sqe_left: number sq entry left
507 * @sq_pgtbl_virt: page table describing buffer consituting SQ region
508 * @sq_pgtbl_phys: dma address of 'sq_pgtbl_virt'
509 * @sq_pgtbl_size: SQ page table size
510 * @cq_virt: virtual address of completion queue (CQ) region
511 * @cq_phys: DMA address of RQ memory region
512 * @cq_mem_size: CQ size
513 * @cq_prod_qe: CQ producer entry pointer
514 * @cq_cons_qe: CQ consumer entry pointer
515 * @cq_first_qe: virtaul address of first entry in CQ
516 * @cq_last_qe: virtaul address of last entry in CQ
517 * @cq_prod_idx: CQ producer index
518 * @cq_cons_idx: CQ consumer index
519 * @cqe_left: number cq entry left
520 * @cqe_size: size of each CQ entry
521 * @cqe_exp_seq_sn: next expected CQE sequence number
522 * @cq_pgtbl_virt: page table describing buffer consituting CQ region
523 * @cq_pgtbl_phys: dma address of 'cq_pgtbl_virt'
524 * @cq_pgtbl_size: CQ page table size
525 * @rq_virt: virtual address of receive queue (RQ) region
526 * @rq_phys: DMA address of RQ memory region
527 * @rq_mem_size: RQ size
528 * @rq_prod_qe: RQ producer entry pointer
529 * @rq_cons_qe: RQ consumer entry pointer
530 * @rq_first_qe: virtaul address of first entry in RQ
531 * @rq_last_qe: virtaul address of last entry in RQ
532 * @rq_prod_idx: RQ producer index
533 * @rq_cons_idx: RQ consumer index
534 * @rqe_left: number rq entry left
535 * @rq_pgtbl_virt: page table describing buffer consituting RQ region
536 * @rq_pgtbl_phys: dma address of 'rq_pgtbl_virt'
537 * @rq_pgtbl_size: RQ page table size
538 *
539 * queue pair (QP) is a per connection shared data structure which is used
540 * to send work requests (SQ), receive completion notifications (CQ)
541 * and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure
542 * below holds queue memory, consumer/producer indexes and page table
543 * information
544 */
545struct qp_info {
546 void __iomem *ctx_base;
547#define DPM_TRIGER_TYPE 0x40
548
549#define BNX2I_570x_QUE_DB_SIZE 0
550#define BNX2I_5771x_QUE_DB_SIZE 16
551 struct sqe *sq_virt;
552 dma_addr_t sq_phys;
553 u32 sq_mem_size;
554
555 struct sqe *sq_prod_qe;
556 struct sqe *sq_cons_qe;
557 struct sqe *sq_first_qe;
558 struct sqe *sq_last_qe;
559 u16 sq_prod_idx;
560 u16 sq_cons_idx;
561 u32 sqe_left;
562
563 void *sq_pgtbl_virt;
564 dma_addr_t sq_pgtbl_phys;
565 u32 sq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
566
567 struct cqe *cq_virt;
568 dma_addr_t cq_phys;
569 u32 cq_mem_size;
570
571 struct cqe *cq_prod_qe;
572 struct cqe *cq_cons_qe;
573 struct cqe *cq_first_qe;
574 struct cqe *cq_last_qe;
575 u16 cq_prod_idx;
576 u16 cq_cons_idx;
577 u32 cqe_left;
578 u32 cqe_size;
579 u32 cqe_exp_seq_sn;
580
581 void *cq_pgtbl_virt;
582 dma_addr_t cq_pgtbl_phys;
583 u32 cq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
584
585 struct rqe *rq_virt;
586 dma_addr_t rq_phys;
587 u32 rq_mem_size;
588
589 struct rqe *rq_prod_qe;
590 struct rqe *rq_cons_qe;
591 struct rqe *rq_first_qe;
592 struct rqe *rq_last_qe;
593 u16 rq_prod_idx;
594 u16 rq_cons_idx;
595 u32 rqe_left;
596
597 void *rq_pgtbl_virt;
598 dma_addr_t rq_pgtbl_phys;
599 u32 rq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
600};
601
602
603
604/*
605 * CID handles
606 */
607struct ep_handles {
608 u32 fw_cid;
609 u32 drv_iscsi_cid;
610 u16 pg_cid;
611 u16 rsvd;
612};
613
614
615enum {
616 EP_STATE_IDLE = 0x0,
617 EP_STATE_PG_OFLD_START = 0x1,
618 EP_STATE_PG_OFLD_COMPL = 0x2,
619 EP_STATE_OFLD_START = 0x4,
620 EP_STATE_OFLD_COMPL = 0x8,
621 EP_STATE_CONNECT_START = 0x10,
622 EP_STATE_CONNECT_COMPL = 0x20,
623 EP_STATE_ULP_UPDATE_START = 0x40,
624 EP_STATE_ULP_UPDATE_COMPL = 0x80,
625 EP_STATE_DISCONN_START = 0x100,
626 EP_STATE_DISCONN_COMPL = 0x200,
627 EP_STATE_CLEANUP_START = 0x400,
628 EP_STATE_CLEANUP_CMPL = 0x800,
629 EP_STATE_TCP_FIN_RCVD = 0x1000,
630 EP_STATE_TCP_RST_RCVD = 0x2000,
631 EP_STATE_PG_OFLD_FAILED = 0x1000000,
632 EP_STATE_ULP_UPDATE_FAILED = 0x2000000,
633 EP_STATE_CLEANUP_FAILED = 0x4000000,
634 EP_STATE_OFLD_FAILED = 0x8000000,
635 EP_STATE_CONNECT_FAILED = 0x10000000,
636 EP_STATE_DISCONN_TIMEDOUT = 0x20000000,
637};
638
639/**
640 * struct bnx2i_endpoint - representation of tcp connection in NX2 world
641 *
642 * @link: list head to link elements
643 * @hba: adapter to which this connection belongs
644 * @conn: iscsi connection this EP is linked to
645 * @sess: iscsi session this EP is linked to
646 * @cm_sk: cnic sock struct
647 * @hba_age: age to detect if 'iscsid' issues ep_disconnect()
648 * after HBA reset is completed by bnx2i/cnic/bnx2
649 * modules
650 * @state: tracks offload connection state machine
651 * @teardown_mode: indicates if conn teardown is abortive or orderly
652 * @qp: QP information
653 * @ids: contains chip allocated *context id* & driver assigned
654 * *iscsi cid*
655 * @ofld_timer: offload timer to detect timeout
656 * @ofld_wait: wait queue
657 *
658 * Endpoint Structure - equivalent of tcp socket structure
659 */
660struct bnx2i_endpoint {
661 struct list_head link;
662 struct bnx2i_hba *hba;
663 struct bnx2i_conn *conn;
664 struct cnic_sock *cm_sk;
665 u32 hba_age;
666 u32 state;
667 unsigned long timestamp;
668 int num_active_cmds;
669
670 struct qp_info qp;
671 struct ep_handles ids;
672 #define ep_iscsi_cid ids.drv_iscsi_cid
673 #define ep_cid ids.fw_cid
674 #define ep_pg_cid ids.pg_cid
675 struct timer_list ofld_timer;
676 wait_queue_head_t ofld_wait;
677};
678
679
680
681/* Global variables */
682extern unsigned int error_mask1, error_mask2;
683extern u64 iscsi_error_mask;
684extern unsigned int en_tcp_dack;
685extern unsigned int event_coal_div;
686
687extern struct scsi_transport_template *bnx2i_scsi_xport_template;
688extern struct iscsi_transport bnx2i_iscsi_transport;
689extern struct cnic_ulp_ops bnx2i_cnic_cb;
690
691extern unsigned int sq_size;
692extern unsigned int rq_size;
693
694extern struct device_attribute *bnx2i_dev_attributes[];
695
696
697
698/*
699 * Function Prototypes
700 */
701extern void bnx2i_identify_device(struct bnx2i_hba *hba);
702extern void bnx2i_register_device(struct bnx2i_hba *hba);
703
704extern void bnx2i_ulp_init(struct cnic_dev *dev);
705extern void bnx2i_ulp_exit(struct cnic_dev *dev);
706extern void bnx2i_start(void *handle);
707extern void bnx2i_stop(void *handle);
708extern void bnx2i_reg_dev_all(void);
709extern void bnx2i_unreg_dev_all(void);
710extern struct bnx2i_hba *get_adapter_list_head(void);
711
712struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
713 u16 iscsi_cid);
714
715int bnx2i_alloc_ep_pool(void);
716void bnx2i_release_ep_pool(void);
717struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
718struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
719
720struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
721
722struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
723void bnx2i_free_hba(struct bnx2i_hba *hba);
724
725void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len);
726void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count);
727
728void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd);
729
730void bnx2i_drop_session(struct iscsi_cls_session *session);
731
732extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba);
733extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
734 struct iscsi_task *mtask);
735extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
736 struct iscsi_task *mtask);
737extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
738 struct bnx2i_cmd *cmnd);
739extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
740 struct iscsi_task *mtask, u32 ttt,
741 char *datap, int data_len, int unsol);
742extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
743 struct iscsi_task *mtask);
744extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
745 struct bnx2i_cmd *cmd);
746extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
747 struct bnx2i_endpoint *ep);
748extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
749extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
750 struct bnx2i_endpoint *ep);
751
752extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
753 struct bnx2i_endpoint *ep);
754extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba,
755 struct bnx2i_endpoint *ep);
756extern void bnx2i_ep_ofld_timer(unsigned long data);
757extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list(
758 struct bnx2i_hba *hba, u32 iscsi_cid);
759extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
760 struct bnx2i_hba *hba, u32 iscsi_cid);
761
762extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
763extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
764
765/* Debug related function prototypes */
766extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
767extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
768extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
769extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
770
771#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
new file mode 100644
index 000000000000..906cef5cda86
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -0,0 +1,2405 @@
1/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 */
13
14#include <scsi/scsi_tcq.h>
15#include <scsi/libiscsi.h>
16#include "bnx2i.h"
17
18/**
19 * bnx2i_get_cid_num - get cid from ep
20 * @ep: endpoint pointer
21 *
22 * Only applicable to 57710 family of devices
23 */
24static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep)
25{
26 u32 cid;
27
28 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
29 cid = ep->ep_cid;
30 else
31 cid = GET_CID_NUM(ep->ep_cid);
32 return cid;
33}
34
35
36/**
37 * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type
38 * @hba: Adapter for which adjustments is to be made
39 *
40 * Only applicable to 57710 family of devices
41 */
42static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
43{
44 u32 num_elements_per_pg;
45
46 if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) ||
47 test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) ||
48 test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
49 if (!is_power_of_2(hba->max_sqes))
50 hba->max_sqes = rounddown_pow_of_two(hba->max_sqes);
51
52 if (!is_power_of_2(hba->max_rqes))
53 hba->max_rqes = rounddown_pow_of_two(hba->max_rqes);
54 }
55
56 /* Adjust each queue size if the user selection does not
57 * yield integral num of page buffers
58 */
59 /* adjust SQ */
60 num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
61 if (hba->max_sqes < num_elements_per_pg)
62 hba->max_sqes = num_elements_per_pg;
63 else if (hba->max_sqes % num_elements_per_pg)
64 hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) &
65 ~(num_elements_per_pg - 1);
66
67 /* adjust CQ */
68 num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
69 if (hba->max_cqes < num_elements_per_pg)
70 hba->max_cqes = num_elements_per_pg;
71 else if (hba->max_cqes % num_elements_per_pg)
72 hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) &
73 ~(num_elements_per_pg - 1);
74
75 /* adjust RQ */
76 num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
77 if (hba->max_rqes < num_elements_per_pg)
78 hba->max_rqes = num_elements_per_pg;
79 else if (hba->max_rqes % num_elements_per_pg)
80 hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) &
81 ~(num_elements_per_pg - 1);
82}
83
84
85/**
86 * bnx2i_get_link_state - get network interface link state
87 * @hba: adapter instance pointer
88 *
89 * updates adapter structure flag based on netdev state
90 */
91static void bnx2i_get_link_state(struct bnx2i_hba *hba)
92{
93 if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
94 set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
95 else
96 clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
97}
98
99
100/**
101 * bnx2i_iscsi_license_error - displays iscsi license related error message
102 * @hba: adapter instance pointer
103 * @error_code: error classification
104 *
105 * Puts out an error log when driver is unable to offload iscsi connection
106 * due to license restrictions
107 */
108static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
109{
110 if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED)
111 /* iSCSI offload not supported on this device */
112 printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n",
113 hba->netdev->name);
114 if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED)
115 /* iSCSI offload not supported on this LOM device */
116 printk(KERN_ERR "bnx2i: LOM is not enable to "
117 "offload iSCSI connections, dev=%s\n",
118 hba->netdev->name);
119 set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state);
120}
121
122
123/**
124 * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
125 * @ep: endpoint (transport indentifier) structure
126 * @action: action, ARM or DISARM. For now only ARM_CQE is used
127 *
128 * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
129 * the driver. EQ event is generated CQ index is hit or at least 1 CQ is
130 * outstanding and on chip timer expires
131 */
132void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
133{
134 struct bnx2i_5771x_cq_db *cq_db;
135 u16 cq_index;
136
137 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
138 return;
139
140 if (action == CNIC_ARM_CQE) {
141 cq_index = ep->qp.cqe_exp_seq_sn +
142 ep->num_active_cmds / event_coal_div;
143 cq_index %= (ep->qp.cqe_size * 2 + 1);
144 if (!cq_index) {
145 cq_index = 1;
146 cq_db = (struct bnx2i_5771x_cq_db *)
147 ep->qp.cq_pgtbl_virt;
148 cq_db->sqn[0] = cq_index;
149 }
150 }
151}
152
153
154/**
155 * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer
156 * @conn: iscsi connection on which RQ event occured
157 * @ptr: driver buffer to which RQ buffer contents is to
158 * be copied
159 * @len: length of valid data inside RQ buf
160 *
161 * Copies RQ buffer contents from shared (DMA'able) memory region to
162 * driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and
163 * scsi sense info
164 */
165void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len)
166{
167 if (!bnx2i_conn->ep->qp.rqe_left)
168 return;
169
170 bnx2i_conn->ep->qp.rqe_left--;
171 memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len);
172 if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) {
173 bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe;
174 bnx2i_conn->ep->qp.rq_cons_idx = 0;
175 } else {
176 bnx2i_conn->ep->qp.rq_cons_qe++;
177 bnx2i_conn->ep->qp.rq_cons_idx++;
178 }
179}
180
181
182static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn)
183{
184 struct bnx2i_5771x_dbell dbell;
185 u32 msg;
186
187 memset(&dbell, 0, sizeof(dbell));
188 dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE <<
189 B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT);
190 msg = *((u32 *)&dbell);
191 /* TODO : get doorbell register mapping */
192 writel(cpu_to_le32(msg), conn->ep->qp.ctx_base);
193}
194
195
196/**
197 * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell
198 * @conn: iscsi connection on which event to post
199 * @count: number of RQ buffer being posted to chip
200 *
201 * No need to ring hardware doorbell for 57710 family of devices
202 */
203void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
204{
205 struct bnx2i_5771x_sq_rq_db *rq_db;
206 u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000);
207 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
208
209 ep->qp.rqe_left += count;
210 ep->qp.rq_prod_idx &= 0x7FFF;
211 ep->qp.rq_prod_idx += count;
212
213 if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) {
214 ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes;
215 if (!hi_bit)
216 ep->qp.rq_prod_idx |= 0x8000;
217 } else
218 ep->qp.rq_prod_idx |= hi_bit;
219
220 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
221 rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt;
222 rq_db->prod_idx = ep->qp.rq_prod_idx;
223 /* no need to ring hardware doorbell for 57710 */
224 } else {
225 writew(ep->qp.rq_prod_idx,
226 ep->qp.ctx_base + CNIC_RECV_DOORBELL);
227 }
228 mmiowb();
229}
230
231
232/**
233 * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine
234 * @conn: iscsi connection to which new SQ entries belong
235 * @count: number of SQ WQEs to post
236 *
237 * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family
238 * of devices. For 5706/5708/5709 new SQ WQE count is written into the
239 * doorbell register
240 */
241static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
242{
243 struct bnx2i_5771x_sq_rq_db *sq_db;
244 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
245
246 ep->num_active_cmds++;
247 wmb(); /* flush SQ WQE memory before the doorbell is rung */
248 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
249 sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
250 sq_db->prod_idx = ep->qp.sq_prod_idx;
251 bnx2i_ring_577xx_doorbell(bnx2i_conn);
252 } else
253 writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
254
255 mmiowb(); /* flush posted PCI writes */
256}
257
258
259/**
260 * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters
261 * @conn: iscsi connection to which new SQ entries belong
262 * @count: number of SQ WQEs to post
263 *
264 * this routine will update SQ driver parameters and ring the doorbell
265 */
266static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
267 int count)
268{
269 int tmp_cnt;
270
271 if (count == 1) {
272 if (bnx2i_conn->ep->qp.sq_prod_qe ==
273 bnx2i_conn->ep->qp.sq_last_qe)
274 bnx2i_conn->ep->qp.sq_prod_qe =
275 bnx2i_conn->ep->qp.sq_first_qe;
276 else
277 bnx2i_conn->ep->qp.sq_prod_qe++;
278 } else {
279 if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <=
280 bnx2i_conn->ep->qp.sq_last_qe)
281 bnx2i_conn->ep->qp.sq_prod_qe += count;
282 else {
283 tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe -
284 bnx2i_conn->ep->qp.sq_prod_qe;
285 bnx2i_conn->ep->qp.sq_prod_qe =
286 &bnx2i_conn->ep->qp.sq_first_qe[count -
287 (tmp_cnt + 1)];
288 }
289 }
290 bnx2i_conn->ep->qp.sq_prod_idx += count;
291 /* Ring the doorbell */
292 bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx);
293}
294
295
296/**
297 * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware
298 * @conn: iscsi connection
299 * @cmd: driver command structure which is requesting
300 * a WQE to sent to chip for further processing
301 *
302 * prepare and post an iSCSI Login request WQE to CNIC firmware
303 */
304int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
305 struct iscsi_task *task)
306{
307 struct bnx2i_cmd *bnx2i_cmd;
308 struct bnx2i_login_request *login_wqe;
309 struct iscsi_login *login_hdr;
310 u32 dword;
311
312 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
313 login_hdr = (struct iscsi_login *)task->hdr;
314 login_wqe = (struct bnx2i_login_request *)
315 bnx2i_conn->ep->qp.sq_prod_qe;
316
317 login_wqe->op_code = login_hdr->opcode;
318 login_wqe->op_attr = login_hdr->flags;
319 login_wqe->version_max = login_hdr->max_version;
320 login_wqe->version_min = login_hdr->min_version;
321 login_wqe->data_length = ntoh24(login_hdr->dlength);
322 login_wqe->isid_lo = *((u32 *) login_hdr->isid);
323 login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2);
324 login_wqe->tsih = login_hdr->tsih;
325 login_wqe->itt = task->itt |
326 (ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT);
327 login_wqe->cid = login_hdr->cid;
328
329 login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
330 login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
331
332 login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
333 login_wqe->resp_bd_list_addr_hi =
334 (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
335
336 dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) |
337 (bnx2i_conn->gen_pdu.resp_buf_size <<
338 ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
339 login_wqe->resp_buffer = dword;
340 login_wqe->flags = 0;
341 login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
342 login_wqe->bd_list_addr_hi =
343 (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
344 login_wqe->num_bds = 1;
345 login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
346
347 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
348 return 0;
349}
350
351/**
352 * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware
353 * @conn: iscsi connection
354 * @mtask: driver command structure which is requesting
355 * a WQE to sent to chip for further processing
356 *
357 * prepare and post an iSCSI Login request WQE to CNIC firmware
358 */
359int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
360 struct iscsi_task *mtask)
361{
362 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
363 struct iscsi_tm *tmfabort_hdr;
364 struct scsi_cmnd *ref_sc;
365 struct iscsi_task *ctask;
366 struct bnx2i_cmd *bnx2i_cmd;
367 struct bnx2i_tmf_request *tmfabort_wqe;
368 u32 dword;
369
370 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
371 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
372 tmfabort_wqe = (struct bnx2i_tmf_request *)
373 bnx2i_conn->ep->qp.sq_prod_qe;
374
375 tmfabort_wqe->op_code = tmfabort_hdr->opcode;
376 tmfabort_wqe->op_attr = 0;
377 tmfabort_wqe->op_attr =
378 ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
379 tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
380 tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
381
382 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
383 tmfabort_wqe->reserved2 = 0;
384 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
385
386 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
387 if (!ctask || ctask->sc)
388 /*
389 * the iscsi layer must have completed the cmd while this
390 * was starting up.
391 */
392 return 0;
393 ref_sc = ctask->sc;
394
395 if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
396 dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
397 else
398 dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
399 tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt);
400 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
401
402 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
403 tmfabort_wqe->bd_list_addr_hi = (u32)
404 ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
405 tmfabort_wqe->num_bds = 1;
406 tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
407
408 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
409 return 0;
410}
411
412/**
413 * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
414 * @conn: iscsi connection
415 * @cmd: driver command structure which is requesting
416 * a WQE to sent to chip for further processing
417 *
418 * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware
419 */
420int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
421 struct bnx2i_cmd *cmd)
422{
423 struct bnx2i_cmd_request *scsi_cmd_wqe;
424
425 scsi_cmd_wqe = (struct bnx2i_cmd_request *)
426 bnx2i_conn->ep->qp.sq_prod_qe;
427 memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request));
428 scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
429
430 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
431 return 0;
432}
433
434/**
435 * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware
436 * @conn: iscsi connection
437 * @cmd: driver command structure which is requesting
438 * a WQE to sent to chip for further processing
439 * @ttt: TTT to be used when building pdu header
440 * @datap: payload buffer pointer
441 * @data_len: payload data length
442 * @unsol: indicated whether nopout pdu is unsolicited pdu or
443 * in response to target's NOPIN w/ TTT != FFFFFFFF
444 *
445 * prepare and post a nopout request WQE to CNIC firmware
446 */
447int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
448 struct iscsi_task *task, u32 ttt,
449 char *datap, int data_len, int unsol)
450{
451 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
452 struct bnx2i_cmd *bnx2i_cmd;
453 struct bnx2i_nop_out_request *nopout_wqe;
454 struct iscsi_nopout *nopout_hdr;
455
456 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
457 nopout_hdr = (struct iscsi_nopout *)task->hdr;
458 nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
459 nopout_wqe->op_code = nopout_hdr->opcode;
460 nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
461 memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
462
463 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
464 u32 tmp = nopout_hdr->lun[0];
465 /* 57710 requires LUN field to be swapped */
466 nopout_hdr->lun[0] = nopout_hdr->lun[1];
467 nopout_hdr->lun[1] = tmp;
468 }
469
470 nopout_wqe->itt = ((u16)task->itt |
471 (ISCSI_TASK_TYPE_MPATH <<
472 ISCSI_TMF_REQUEST_TYPE_SHIFT));
473 nopout_wqe->ttt = ttt;
474 nopout_wqe->flags = 0;
475 if (!unsol)
476 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
477 else if (nopout_hdr->itt == RESERVED_ITT)
478 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
479
480 nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
481 nopout_wqe->data_length = data_len;
482 if (data_len) {
483 /* handle payload data, not required in first release */
484 printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n");
485 } else {
486 nopout_wqe->bd_list_addr_lo = (u32)
487 bnx2i_conn->hba->mp_bd_dma;
488 nopout_wqe->bd_list_addr_hi =
489 (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
490 nopout_wqe->num_bds = 1;
491 }
492 nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
493
494 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
495 return 0;
496}
497
498
499/**
500 * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware
501 * @conn: iscsi connection
502 * @cmd: driver command structure which is requesting
503 * a WQE to sent to chip for further processing
504 *
505 * prepare and post logout request WQE to CNIC firmware
506 */
507int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
508 struct iscsi_task *task)
509{
510 struct bnx2i_cmd *bnx2i_cmd;
511 struct bnx2i_logout_request *logout_wqe;
512 struct iscsi_logout *logout_hdr;
513
514 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
515 logout_hdr = (struct iscsi_logout *)task->hdr;
516
517 logout_wqe = (struct bnx2i_logout_request *)
518 bnx2i_conn->ep->qp.sq_prod_qe;
519 memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request));
520
521 logout_wqe->op_code = logout_hdr->opcode;
522 logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
523 logout_wqe->op_attr =
524 logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE;
525 logout_wqe->itt = ((u16)task->itt |
526 (ISCSI_TASK_TYPE_MPATH <<
527 ISCSI_LOGOUT_REQUEST_TYPE_SHIFT));
528 logout_wqe->data_length = 0;
529 logout_wqe->cid = 0;
530
531 logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
532 logout_wqe->bd_list_addr_hi = (u32)
533 ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
534 logout_wqe->num_bds = 1;
535 logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
536
537 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
538 return 0;
539}
540
541
542/**
543 * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware
544 * @conn: iscsi connection which requires iscsi parameter update
545 *
546 * sends down iSCSI Conn Update request to move iSCSI conn to FFP
547 */
548void bnx2i_update_iscsi_conn(struct iscsi_conn *conn)
549{
550 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
551 struct bnx2i_hba *hba = bnx2i_conn->hba;
552 struct kwqe *kwqe_arr[2];
553 struct iscsi_kwqe_conn_update *update_wqe;
554 struct iscsi_kwqe_conn_update conn_update_kwqe;
555
556 update_wqe = &conn_update_kwqe;
557
558 update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN;
559 update_wqe->hdr.flags =
560 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
561
562 /* 5771x requires conn context id to be passed as is */
563 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type))
564 update_wqe->context_id = bnx2i_conn->ep->ep_cid;
565 else
566 update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7);
567 update_wqe->conn_flags = 0;
568 if (conn->hdrdgst_en)
569 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST;
570 if (conn->datadgst_en)
571 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST;
572 if (conn->session->initial_r2t_en)
573 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T;
574 if (conn->session->imm_data_en)
575 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA;
576
577 update_wqe->max_send_pdu_length = conn->max_xmit_dlength;
578 update_wqe->max_recv_pdu_length = conn->max_recv_dlength;
579 update_wqe->first_burst_length = conn->session->first_burst;
580 update_wqe->max_burst_length = conn->session->max_burst;
581 update_wqe->exp_stat_sn = conn->exp_statsn;
582 update_wqe->max_outstanding_r2ts = conn->session->max_r2t;
583 update_wqe->session_error_recovery_level = conn->session->erl;
584 iscsi_conn_printk(KERN_ALERT, conn,
585 "bnx2i: conn update - MBL 0x%x FBL 0x%x"
586 "MRDSL_I 0x%x MRDSL_T 0x%x \n",
587 update_wqe->max_burst_length,
588 update_wqe->first_burst_length,
589 update_wqe->max_recv_pdu_length,
590 update_wqe->max_send_pdu_length);
591
592 kwqe_arr[0] = (struct kwqe *) update_wqe;
593 if (hba->cnic && hba->cnic->submit_kwqes)
594 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
595}
596
597
598/**
599 * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware
600 * @data: endpoint (transport handle) structure pointer
601 *
602 * routine to handle connection offload/destroy request timeout
603 */
604void bnx2i_ep_ofld_timer(unsigned long data)
605{
606 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data;
607
608 if (ep->state == EP_STATE_OFLD_START) {
609 printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
610 ep->state = EP_STATE_OFLD_FAILED;
611 } else if (ep->state == EP_STATE_DISCONN_START) {
612 printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n");
613 ep->state = EP_STATE_DISCONN_TIMEDOUT;
614 } else if (ep->state == EP_STATE_CLEANUP_START) {
615 printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n");
616 ep->state = EP_STATE_CLEANUP_FAILED;
617 }
618
619 wake_up_interruptible(&ep->ofld_wait);
620}
621
622
623static int bnx2i_power_of2(u32 val)
624{
625 u32 power = 0;
626 if (val & (val - 1))
627 return power;
628 val--;
629 while (val) {
630 val = val >> 1;
631 power++;
632 }
633 return power;
634}
635
636
637/**
638 * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request
639 * @hba: adapter structure pointer
640 * @cmd: driver command structure which is requesting
641 * a WQE to sent to chip for further processing
642 *
643 * prepares and posts CONN_OFLD_REQ1/2 KWQE
644 */
645void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
646{
647 struct bnx2i_cleanup_request *cmd_cleanup;
648
649 cmd_cleanup =
650 (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe;
651 memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request));
652
653 cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST;
654 cmd_cleanup->itt = cmd->req.itt;
655 cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */
656
657 bnx2i_ring_dbell_update_sq_params(cmd->conn, 1);
658}
659
660
661/**
662 * bnx2i_send_conn_destroy - initiates iscsi connection teardown process
663 * @hba: adapter structure pointer
664 * @ep: endpoint (transport indentifier) structure
665 *
666 * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
667 * iscsi connection context clean-up process
668 */
669void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
670{
671 struct kwqe *kwqe_arr[2];
672 struct iscsi_kwqe_conn_destroy conn_cleanup;
673
674 memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
675
676 conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN;
677 conn_cleanup.hdr.flags =
678 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
679 /* 5771x requires conn context id to be passed as is */
680 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
681 conn_cleanup.context_id = ep->ep_cid;
682 else
683 conn_cleanup.context_id = (ep->ep_cid >> 7);
684
685 conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid;
686
687 kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
688 if (hba->cnic && hba->cnic->submit_kwqes)
689 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
690}
691
692
693/**
694 * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
695 * @hba: adapter structure pointer
696 * @ep: endpoint (transport indentifier) structure
697 *
698 * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
699 */
700static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
701 struct bnx2i_endpoint *ep)
702{
703 struct kwqe *kwqe_arr[2];
704 struct iscsi_kwqe_conn_offload1 ofld_req1;
705 struct iscsi_kwqe_conn_offload2 ofld_req2;
706 dma_addr_t dma_addr;
707 int num_kwqes = 2;
708 u32 *ptbl;
709
710 ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
711 ofld_req1.hdr.flags =
712 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
713
714 ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
715
716 dma_addr = ep->qp.sq_pgtbl_phys;
717 ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
718 ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
719
720 dma_addr = ep->qp.cq_pgtbl_phys;
721 ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
722 ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
723
724 ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
725 ofld_req2.hdr.flags =
726 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
727
728 dma_addr = ep->qp.rq_pgtbl_phys;
729 ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
730 ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
731
732 ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
733
734 ofld_req2.sq_first_pte.hi = *ptbl++;
735 ofld_req2.sq_first_pte.lo = *ptbl;
736
737 ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
738 ofld_req2.cq_first_pte.hi = *ptbl++;
739 ofld_req2.cq_first_pte.lo = *ptbl;
740
741 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
742 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
743 ofld_req2.num_additional_wqes = 0;
744
745 if (hba->cnic && hba->cnic->submit_kwqes)
746 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
747}
748
749
750/**
751 * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
752 * @hba: adapter structure pointer
753 * @ep: endpoint (transport indentifier) structure
754 *
755 * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
756 */
757static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
758 struct bnx2i_endpoint *ep)
759{
760 struct kwqe *kwqe_arr[5];
761 struct iscsi_kwqe_conn_offload1 ofld_req1;
762 struct iscsi_kwqe_conn_offload2 ofld_req2;
763 struct iscsi_kwqe_conn_offload3 ofld_req3[1];
764 dma_addr_t dma_addr;
765 int num_kwqes = 2;
766 u32 *ptbl;
767
768 ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
769 ofld_req1.hdr.flags =
770 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
771
772 ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
773
774 dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE;
775 ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
776 ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
777
778 dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE;
779 ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
780 ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
781
782 ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
783 ofld_req2.hdr.flags =
784 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
785
786 dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE;
787 ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
788 ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
789
790 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
791 ofld_req2.sq_first_pte.hi = *ptbl++;
792 ofld_req2.sq_first_pte.lo = *ptbl;
793
794 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
795 ofld_req2.cq_first_pte.hi = *ptbl++;
796 ofld_req2.cq_first_pte.lo = *ptbl;
797
798 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
799 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
800
801 ofld_req2.num_additional_wqes = 1;
802 memset(ofld_req3, 0x00, sizeof(ofld_req3[0]));
803 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
804 ofld_req3[0].qp_first_pte[0].hi = *ptbl++;
805 ofld_req3[0].qp_first_pte[0].lo = *ptbl;
806
807 kwqe_arr[2] = (struct kwqe *) ofld_req3;
808 /* need if we decide to go with multiple KCQE's per conn */
809 num_kwqes += 1;
810
811 if (hba->cnic && hba->cnic->submit_kwqes)
812 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
813}
814
815/**
816 * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
817 *
818 * @hba: adapter structure pointer
819 * @ep: endpoint (transport indentifier) structure
820 *
821 * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
822 */
823void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
824{
825 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
826 bnx2i_5771x_send_conn_ofld_req(hba, ep);
827 else
828 bnx2i_570x_send_conn_ofld_req(hba, ep);
829}
830
831
832/**
833 * setup_qp_page_tables - iscsi QP page table setup function
834 * @ep: endpoint (transport indentifier) structure
835 *
836 * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
837 * 64-bit address in big endian format. Whereas 10G/sec (57710) requires
838 * PT in little endian format
839 */
840static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
841{
842 int num_pages;
843 u32 *ptbl;
844 dma_addr_t page;
845 int cnic_dev_10g;
846
847 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
848 cnic_dev_10g = 1;
849 else
850 cnic_dev_10g = 0;
851
852 /* SQ page table */
853 memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
854 num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
855 page = ep->qp.sq_phys;
856
857 if (cnic_dev_10g)
858 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
859 else
860 ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
861 while (num_pages--) {
862 if (cnic_dev_10g) {
863 /* PTE is written in little endian format for 57710 */
864 *ptbl = (u32) page;
865 ptbl++;
866 *ptbl = (u32) ((u64) page >> 32);
867 ptbl++;
868 page += PAGE_SIZE;
869 } else {
870 /* PTE is written in big endian format for
871 * 5706/5708/5709 devices */
872 *ptbl = (u32) ((u64) page >> 32);
873 ptbl++;
874 *ptbl = (u32) page;
875 ptbl++;
876 page += PAGE_SIZE;
877 }
878 }
879
880 /* RQ page table */
881 memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
882 num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
883 page = ep->qp.rq_phys;
884
885 if (cnic_dev_10g)
886 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
887 else
888 ptbl = (u32 *) ep->qp.rq_pgtbl_virt;
889 while (num_pages--) {
890 if (cnic_dev_10g) {
891 /* PTE is written in little endian format for 57710 */
892 *ptbl = (u32) page;
893 ptbl++;
894 *ptbl = (u32) ((u64) page >> 32);
895 ptbl++;
896 page += PAGE_SIZE;
897 } else {
898 /* PTE is written in big endian format for
899 * 5706/5708/5709 devices */
900 *ptbl = (u32) ((u64) page >> 32);
901 ptbl++;
902 *ptbl = (u32) page;
903 ptbl++;
904 page += PAGE_SIZE;
905 }
906 }
907
908 /* CQ page table */
909 memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
910 num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
911 page = ep->qp.cq_phys;
912
913 if (cnic_dev_10g)
914 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
915 else
916 ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
917 while (num_pages--) {
918 if (cnic_dev_10g) {
919 /* PTE is written in little endian format for 57710 */
920 *ptbl = (u32) page;
921 ptbl++;
922 *ptbl = (u32) ((u64) page >> 32);
923 ptbl++;
924 page += PAGE_SIZE;
925 } else {
926 /* PTE is written in big endian format for
927 * 5706/5708/5709 devices */
928 *ptbl = (u32) ((u64) page >> 32);
929 ptbl++;
930 *ptbl = (u32) page;
931 ptbl++;
932 page += PAGE_SIZE;
933 }
934 }
935}
936
937
938/**
939 * bnx2i_alloc_qp_resc - allocates required resources for QP.
940 * @hba: adapter structure pointer
941 * @ep: endpoint (transport indentifier) structure
942 *
943 * Allocate QP (transport layer for iSCSI connection) resources, DMA'able
944 * memory for SQ/RQ/CQ and page tables. EP structure elements such
945 * as producer/consumer indexes/pointers, queue sizes and page table
946 * contents are setup
947 */
948int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
949{
950 struct bnx2i_5771x_cq_db *cq_db;
951
952 ep->hba = hba;
953 ep->conn = NULL;
954 ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0;
955
956 /* Allocate page table memory for SQ which is page aligned */
957 ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
958 ep->qp.sq_mem_size =
959 (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
960 ep->qp.sq_pgtbl_size =
961 (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
962 ep->qp.sq_pgtbl_size =
963 (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
964
965 ep->qp.sq_pgtbl_virt =
966 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
967 &ep->qp.sq_pgtbl_phys, GFP_KERNEL);
968 if (!ep->qp.sq_pgtbl_virt) {
969 printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n",
970 ep->qp.sq_pgtbl_size);
971 goto mem_alloc_err;
972 }
973
974 /* Allocate memory area for actual SQ element */
975 ep->qp.sq_virt =
976 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
977 &ep->qp.sq_phys, GFP_KERNEL);
978 if (!ep->qp.sq_virt) {
979 printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
980 ep->qp.sq_mem_size);
981 goto mem_alloc_err;
982 }
983
984 memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
985 ep->qp.sq_first_qe = ep->qp.sq_virt;
986 ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
987 ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
988 ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1];
989 ep->qp.sq_prod_idx = 0;
990 ep->qp.sq_cons_idx = 0;
991 ep->qp.sqe_left = hba->max_sqes;
992
993 /* Allocate page table memory for CQ which is page aligned */
994 ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
995 ep->qp.cq_mem_size =
996 (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
997 ep->qp.cq_pgtbl_size =
998 (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
999 ep->qp.cq_pgtbl_size =
1000 (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
1001
1002 ep->qp.cq_pgtbl_virt =
1003 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
1004 &ep->qp.cq_pgtbl_phys, GFP_KERNEL);
1005 if (!ep->qp.cq_pgtbl_virt) {
1006 printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n",
1007 ep->qp.cq_pgtbl_size);
1008 goto mem_alloc_err;
1009 }
1010
1011 /* Allocate memory area for actual CQ element */
1012 ep->qp.cq_virt =
1013 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1014 &ep->qp.cq_phys, GFP_KERNEL);
1015 if (!ep->qp.cq_virt) {
1016 printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
1017 ep->qp.cq_mem_size);
1018 goto mem_alloc_err;
1019 }
1020 memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
1021
1022 ep->qp.cq_first_qe = ep->qp.cq_virt;
1023 ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
1024 ep->qp.cq_cons_qe = ep->qp.cq_first_qe;
1025 ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1];
1026 ep->qp.cq_prod_idx = 0;
1027 ep->qp.cq_cons_idx = 0;
1028 ep->qp.cqe_left = hba->max_cqes;
1029 ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN;
1030 ep->qp.cqe_size = hba->max_cqes;
1031
1032 /* Invalidate all EQ CQE index, req only for 57710 */
1033 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
1034 memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS);
1035
1036 /* Allocate page table memory for RQ which is page aligned */
1037 ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
1038 ep->qp.rq_mem_size =
1039 (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
1040 ep->qp.rq_pgtbl_size =
1041 (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
1042 ep->qp.rq_pgtbl_size =
1043 (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
1044
1045 ep->qp.rq_pgtbl_virt =
1046 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
1047 &ep->qp.rq_pgtbl_phys, GFP_KERNEL);
1048 if (!ep->qp.rq_pgtbl_virt) {
1049 printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n",
1050 ep->qp.rq_pgtbl_size);
1051 goto mem_alloc_err;
1052 }
1053
1054 /* Allocate memory area for actual RQ element */
1055 ep->qp.rq_virt =
1056 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
1057 &ep->qp.rq_phys, GFP_KERNEL);
1058 if (!ep->qp.rq_virt) {
1059 printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n",
1060 ep->qp.rq_mem_size);
1061 goto mem_alloc_err;
1062 }
1063
1064 ep->qp.rq_first_qe = ep->qp.rq_virt;
1065 ep->qp.rq_prod_qe = ep->qp.rq_first_qe;
1066 ep->qp.rq_cons_qe = ep->qp.rq_first_qe;
1067 ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1];
1068 ep->qp.rq_prod_idx = 0x8000;
1069 ep->qp.rq_cons_idx = 0;
1070 ep->qp.rqe_left = hba->max_rqes;
1071
1072 setup_qp_page_tables(ep);
1073
1074 return 0;
1075
1076mem_alloc_err:
1077 bnx2i_free_qp_resc(hba, ep);
1078 return -ENOMEM;
1079}
1080
1081
1082
1083/**
1084 * bnx2i_free_qp_resc - free memory resources held by QP
1085 * @hba: adapter structure pointer
1086 * @ep: endpoint (transport indentifier) structure
1087 *
1088 * Free QP resources - SQ/RQ/CQ memory and page tables.
1089 */
1090void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1091{
1092 if (ep->qp.ctx_base) {
1093 iounmap(ep->qp.ctx_base);
1094 ep->qp.ctx_base = NULL;
1095 }
1096 /* Free SQ mem */
1097 if (ep->qp.sq_pgtbl_virt) {
1098 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
1099 ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys);
1100 ep->qp.sq_pgtbl_virt = NULL;
1101 ep->qp.sq_pgtbl_phys = 0;
1102 }
1103 if (ep->qp.sq_virt) {
1104 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
1105 ep->qp.sq_virt, ep->qp.sq_phys);
1106 ep->qp.sq_virt = NULL;
1107 ep->qp.sq_phys = 0;
1108 }
1109
1110 /* Free RQ mem */
1111 if (ep->qp.rq_pgtbl_virt) {
1112 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
1113 ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys);
1114 ep->qp.rq_pgtbl_virt = NULL;
1115 ep->qp.rq_pgtbl_phys = 0;
1116 }
1117 if (ep->qp.rq_virt) {
1118 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
1119 ep->qp.rq_virt, ep->qp.rq_phys);
1120 ep->qp.rq_virt = NULL;
1121 ep->qp.rq_phys = 0;
1122 }
1123
1124 /* Free CQ mem */
1125 if (ep->qp.cq_pgtbl_virt) {
1126 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
1127 ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys);
1128 ep->qp.cq_pgtbl_virt = NULL;
1129 ep->qp.cq_pgtbl_phys = 0;
1130 }
1131 if (ep->qp.cq_virt) {
1132 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1133 ep->qp.cq_virt, ep->qp.cq_phys);
1134 ep->qp.cq_virt = NULL;
1135 ep->qp.cq_phys = 0;
1136 }
1137}
1138
1139
1140/**
1141 * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w
1142 * @hba: adapter structure pointer
1143 *
1144 * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w
1145 * This results in iSCSi support validation and on-chip context manager
1146 * initialization. Firmware completes this handshake with a CQE carrying
1147 * the result of iscsi support validation. Parameter carried by
1148 * iscsi init request determines the number of offloaded connection and
1149 * tolerance level for iscsi protocol violation this hba/chip can support
1150 */
1151int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
1152{
1153 struct kwqe *kwqe_arr[3];
1154 struct iscsi_kwqe_init1 iscsi_init;
1155 struct iscsi_kwqe_init2 iscsi_init2;
1156 int rc = 0;
1157 u64 mask64;
1158
1159 bnx2i_adjust_qp_size(hba);
1160
1161 iscsi_init.flags =
1162 ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
1163 if (en_tcp_dack)
1164 iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
1165 iscsi_init.reserved0 = 0;
1166 iscsi_init.num_cqs = 1;
1167 iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1;
1168 iscsi_init.hdr.flags =
1169 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
1170
1171 iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
1172 iscsi_init.dummy_buffer_addr_hi =
1173 (u32) ((u64) hba->dummy_buf_dma >> 32);
1174
1175 hba->ctx_ccell_tasks =
1176 ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
1177 iscsi_init.num_ccells_per_conn = hba->num_ccell;
1178 iscsi_init.num_tasks_per_conn = hba->max_sqes;
1179 iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
1180 iscsi_init.sq_num_wqes = hba->max_sqes;
1181 iscsi_init.cq_log_wqes_per_page =
1182 (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
1183 iscsi_init.cq_num_wqes = hba->max_cqes;
1184 iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
1185 (PAGE_SIZE - 1)) / PAGE_SIZE;
1186 iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
1187 (PAGE_SIZE - 1)) / PAGE_SIZE;
1188 iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
1189 iscsi_init.rq_num_wqes = hba->max_rqes;
1190
1191
1192 iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2;
1193 iscsi_init2.hdr.flags =
1194 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
1195 iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1;
1196 mask64 = 0x0ULL;
1197 mask64 |= (
1198 /* CISCO MDS */
1199 (1UL <<
1200 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) |
1201 /* HP MSA1510i */
1202 (1UL <<
1203 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
1204 /* EMC */
1205 (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
1206 if (error_mask1)
1207 iscsi_init2.error_bit_map[0] = error_mask1;
1208 else
1209 iscsi_init2.error_bit_map[0] = (u32) mask64;
1210
1211 if (error_mask2)
1212 iscsi_init2.error_bit_map[1] = error_mask2;
1213 else
1214 iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
1215
1216 iscsi_error_mask = mask64;
1217
1218 kwqe_arr[0] = (struct kwqe *) &iscsi_init;
1219 kwqe_arr[1] = (struct kwqe *) &iscsi_init2;
1220
1221 if (hba->cnic && hba->cnic->submit_kwqes)
1222 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2);
1223 return rc;
1224}
1225
1226
1227/**
1228 * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
1229 * @conn: iscsi connection
1230 * @cqe: pointer to newly DMA'ed CQE entry for processing
1231 *
1232 * process SCSI CMD Response CQE & complete the request to SCSI-ML
1233 */
1234static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1235 struct bnx2i_conn *bnx2i_conn,
1236 struct cqe *cqe)
1237{
1238 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1239 struct bnx2i_cmd_response *resp_cqe;
1240 struct bnx2i_cmd *bnx2i_cmd;
1241 struct iscsi_task *task;
1242 struct iscsi_cmd_rsp *hdr;
1243 u32 datalen = 0;
1244
1245 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1246 spin_lock(&session->lock);
1247 task = iscsi_itt_to_task(conn,
1248 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
1249 if (!task)
1250 goto fail;
1251
1252 bnx2i_cmd = task->dd_data;
1253
1254 if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
1255 conn->datain_pdus_cnt +=
1256 resp_cqe->task_stat.read_stat.num_data_outs;
1257 conn->rxdata_octets +=
1258 bnx2i_cmd->req.total_data_transfer_length;
1259 } else {
1260 conn->dataout_pdus_cnt +=
1261 resp_cqe->task_stat.read_stat.num_data_outs;
1262 conn->r2t_pdus_cnt +=
1263 resp_cqe->task_stat.read_stat.num_r2ts;
1264 conn->txdata_octets +=
1265 bnx2i_cmd->req.total_data_transfer_length;
1266 }
1267 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
1268
1269 hdr = (struct iscsi_cmd_rsp *)task->hdr;
1270 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1271 hdr->opcode = resp_cqe->op_code;
1272 hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
1273 hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn);
1274 hdr->response = resp_cqe->response;
1275 hdr->cmd_status = resp_cqe->status;
1276 hdr->flags = resp_cqe->response_flags;
1277 hdr->residual_count = cpu_to_be32(resp_cqe->residual_count);
1278
1279 if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN)
1280 goto done;
1281
1282 if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) {
1283 datalen = resp_cqe->data_length;
1284 if (datalen < 2)
1285 goto done;
1286
1287 if (datalen > BNX2I_RQ_WQE_SIZE) {
1288 iscsi_conn_printk(KERN_ERR, conn,
1289 "sense data len %d > RQ sz\n",
1290 datalen);
1291 datalen = BNX2I_RQ_WQE_SIZE;
1292 } else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) {
1293 iscsi_conn_printk(KERN_ERR, conn,
1294 "sense data len %d > conn data\n",
1295 datalen);
1296 datalen = ISCSI_DEF_MAX_RECV_SEG_LEN;
1297 }
1298
1299 bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen);
1300 bnx2i_put_rq_buf(bnx2i_cmd->conn, 1);
1301 }
1302
1303done:
1304 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
1305 conn->data, datalen);
1306fail:
1307 spin_unlock(&session->lock);
1308 return 0;
1309}
1310
1311
1312/**
1313 * bnx2i_process_login_resp - this function handles iscsi login response
1314 * @session: iscsi session pointer
1315 * @bnx2i_conn: iscsi connection pointer
1316 * @cqe: pointer to newly DMA'ed CQE entry for processing
1317 *
1318 * process Login Response CQE & complete it to open-iscsi user daemon
1319 */
1320static int bnx2i_process_login_resp(struct iscsi_session *session,
1321 struct bnx2i_conn *bnx2i_conn,
1322 struct cqe *cqe)
1323{
1324 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1325 struct iscsi_task *task;
1326 struct bnx2i_login_response *login;
1327 struct iscsi_login_rsp *resp_hdr;
1328 int pld_len;
1329 int pad_len;
1330
1331 login = (struct bnx2i_login_response *) cqe;
1332 spin_lock(&session->lock);
1333 task = iscsi_itt_to_task(conn,
1334 login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
1335 if (!task)
1336 goto done;
1337
1338 resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1339 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1340 resp_hdr->opcode = login->op_code;
1341 resp_hdr->flags = login->response_flags;
1342 resp_hdr->max_version = login->version_max;
1343 resp_hdr->active_version = login->version_active;;
1344 resp_hdr->hlength = 0;
1345
1346 hton24(resp_hdr->dlength, login->data_length);
1347 memcpy(resp_hdr->isid, &login->isid_lo, 6);
1348 resp_hdr->tsih = cpu_to_be16(login->tsih);
1349 resp_hdr->itt = task->hdr->itt;
1350 resp_hdr->statsn = cpu_to_be32(login->stat_sn);
1351 resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn);
1352 resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn);
1353 resp_hdr->status_class = login->status_class;
1354 resp_hdr->status_detail = login->status_detail;
1355 pld_len = login->data_length;
1356 bnx2i_conn->gen_pdu.resp_wr_ptr =
1357 bnx2i_conn->gen_pdu.resp_buf + pld_len;
1358
1359 pad_len = 0;
1360 if (pld_len & 0x3)
1361 pad_len = 4 - (pld_len % 4);
1362
1363 if (pad_len) {
1364 int i = 0;
1365 for (i = 0; i < pad_len; i++) {
1366 bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
1367 bnx2i_conn->gen_pdu.resp_wr_ptr++;
1368 }
1369 }
1370
1371 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
1372 bnx2i_conn->gen_pdu.resp_buf,
1373 bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
1374done:
1375 spin_unlock(&session->lock);
1376 return 0;
1377}
1378
1379/**
1380 * bnx2i_process_tmf_resp - this function handles iscsi TMF response
1381 * @session: iscsi session pointer
1382 * @bnx2i_conn: iscsi connection pointer
1383 * @cqe: pointer to newly DMA'ed CQE entry for processing
1384 *
1385 * process iSCSI TMF Response CQE and wake up the driver eh thread.
1386 */
1387static int bnx2i_process_tmf_resp(struct iscsi_session *session,
1388 struct bnx2i_conn *bnx2i_conn,
1389 struct cqe *cqe)
1390{
1391 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1392 struct iscsi_task *task;
1393 struct bnx2i_tmf_response *tmf_cqe;
1394 struct iscsi_tm_rsp *resp_hdr;
1395
1396 tmf_cqe = (struct bnx2i_tmf_response *)cqe;
1397 spin_lock(&session->lock);
1398 task = iscsi_itt_to_task(conn,
1399 tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
1400 if (!task)
1401 goto done;
1402
1403 resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1404 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1405 resp_hdr->opcode = tmf_cqe->op_code;
1406 resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn);
1407 resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn);
1408 resp_hdr->itt = task->hdr->itt;
1409 resp_hdr->response = tmf_cqe->response;
1410
1411 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
1412done:
1413 spin_unlock(&session->lock);
1414 return 0;
1415}
1416
1417/**
1418 * bnx2i_process_logout_resp - this function handles iscsi logout response
1419 * @session: iscsi session pointer
1420 * @bnx2i_conn: iscsi connection pointer
1421 * @cqe: pointer to newly DMA'ed CQE entry for processing
1422 *
1423 * process iSCSI Logout Response CQE & make function call to
1424 * notify the user daemon.
1425 */
1426static int bnx2i_process_logout_resp(struct iscsi_session *session,
1427 struct bnx2i_conn *bnx2i_conn,
1428 struct cqe *cqe)
1429{
1430 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1431 struct iscsi_task *task;
1432 struct bnx2i_logout_response *logout;
1433 struct iscsi_logout_rsp *resp_hdr;
1434
1435 logout = (struct bnx2i_logout_response *) cqe;
1436 spin_lock(&session->lock);
1437 task = iscsi_itt_to_task(conn,
1438 logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
1439 if (!task)
1440 goto done;
1441
1442 resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1443 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1444 resp_hdr->opcode = logout->op_code;
1445 resp_hdr->flags = logout->response;
1446 resp_hdr->hlength = 0;
1447
1448 resp_hdr->itt = task->hdr->itt;
1449 resp_hdr->statsn = task->hdr->exp_statsn;
1450 resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn);
1451 resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn);
1452
1453 resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait);
1454 resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain);
1455
1456 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
1457done:
1458 spin_unlock(&session->lock);
1459 return 0;
1460}
1461
1462/**
1463 * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE
1464 * @session: iscsi session pointer
1465 * @bnx2i_conn: iscsi connection pointer
1466 * @cqe: pointer to newly DMA'ed CQE entry for processing
1467 *
1468 * process iSCSI NOPIN local completion CQE, frees IIT and command structures
1469 */
1470static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
1471 struct bnx2i_conn *bnx2i_conn,
1472 struct cqe *cqe)
1473{
1474 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1475 struct bnx2i_nop_in_msg *nop_in;
1476 struct iscsi_task *task;
1477
1478 nop_in = (struct bnx2i_nop_in_msg *)cqe;
1479 spin_lock(&session->lock);
1480 task = iscsi_itt_to_task(conn,
1481 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
1482 if (task)
1483 iscsi_put_task(task);
1484 spin_unlock(&session->lock);
1485}
1486
1487/**
1488 * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd
1489 * @conn: iscsi connection
1490 *
1491 * Firmware advances RQ producer index for every unsolicited PDU even if
1492 * payload data length is '0'. This function makes corresponding
1493 * adjustments on the driver side to match this f/w behavior
1494 */
1495static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn)
1496{
1497 char dummy_rq_data[2];
1498 bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1);
1499 bnx2i_put_rq_buf(bnx2i_conn, 1);
1500}
1501
1502
1503/**
1504 * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE
1505 * @session: iscsi session pointer
1506 * @bnx2i_conn: iscsi connection pointer
1507 * @cqe: pointer to newly DMA'ed CQE entry for processing
1508 *
1509 * process iSCSI target's proactive iSCSI NOPIN request
1510 */
1511static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
1512 struct bnx2i_conn *bnx2i_conn,
1513 struct cqe *cqe)
1514{
1515 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1516 struct iscsi_task *task;
1517 struct bnx2i_nop_in_msg *nop_in;
1518 struct iscsi_nopin *hdr;
1519 u32 itt;
1520 int tgt_async_nop = 0;
1521
1522 nop_in = (struct bnx2i_nop_in_msg *)cqe;
1523 itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX;
1524
1525 spin_lock(&session->lock);
1526 hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
1527 memset(hdr, 0, sizeof(struct iscsi_hdr));
1528 hdr->opcode = nop_in->op_code;
1529 hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn);
1530 hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn);
1531 hdr->ttt = cpu_to_be32(nop_in->ttt);
1532
1533 if (itt == (u16) RESERVED_ITT) {
1534 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1535 hdr->itt = RESERVED_ITT;
1536 tgt_async_nop = 1;
1537 goto done;
1538 }
1539
1540 /* this is a response to one of our nop-outs */
1541 task = iscsi_itt_to_task(conn, itt);
1542 if (task) {
1543 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1544 hdr->itt = task->hdr->itt;
1545 hdr->ttt = cpu_to_be32(nop_in->ttt);
1546 memcpy(hdr->lun, nop_in->lun, 8);
1547 }
1548done:
1549 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1550 spin_unlock(&session->lock);
1551
1552 return tgt_async_nop;
1553}
1554
1555
1556/**
1557 * bnx2i_process_async_mesg - this function handles iscsi async message
1558 * @session: iscsi session pointer
1559 * @bnx2i_conn: iscsi connection pointer
1560 * @cqe: pointer to newly DMA'ed CQE entry for processing
1561 *
1562 * process iSCSI ASYNC Message
1563 */
1564static void bnx2i_process_async_mesg(struct iscsi_session *session,
1565 struct bnx2i_conn *bnx2i_conn,
1566 struct cqe *cqe)
1567{
1568 struct bnx2i_async_msg *async_cqe;
1569 struct iscsi_async *resp_hdr;
1570 u8 async_event;
1571
1572 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1573
1574 async_cqe = (struct bnx2i_async_msg *)cqe;
1575 async_event = async_cqe->async_event;
1576
1577 if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) {
1578 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
1579 "async: scsi events not supported\n");
1580 return;
1581 }
1582
1583 spin_lock(&session->lock);
1584 resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
1585 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1586 resp_hdr->opcode = async_cqe->op_code;
1587 resp_hdr->flags = 0x80;
1588
1589 memcpy(resp_hdr->lun, async_cqe->lun, 8);
1590 resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn);
1591 resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn);
1592
1593 resp_hdr->async_event = async_cqe->async_event;
1594 resp_hdr->async_vcode = async_cqe->async_vcode;
1595
1596 resp_hdr->param1 = cpu_to_be16(async_cqe->param1);
1597 resp_hdr->param2 = cpu_to_be16(async_cqe->param2);
1598 resp_hdr->param3 = cpu_to_be16(async_cqe->param3);
1599
1600 __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
1601 (struct iscsi_hdr *)resp_hdr, NULL, 0);
1602 spin_unlock(&session->lock);
1603}
1604
1605
1606/**
1607 * bnx2i_process_reject_mesg - process iscsi reject pdu
1608 * @session: iscsi session pointer
1609 * @bnx2i_conn: iscsi connection pointer
1610 * @cqe: pointer to newly DMA'ed CQE entry for processing
1611 *
1612 * process iSCSI REJECT message
1613 */
1614static void bnx2i_process_reject_mesg(struct iscsi_session *session,
1615 struct bnx2i_conn *bnx2i_conn,
1616 struct cqe *cqe)
1617{
1618 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1619 struct bnx2i_reject_msg *reject;
1620 struct iscsi_reject *hdr;
1621
1622 reject = (struct bnx2i_reject_msg *) cqe;
1623 if (reject->data_length) {
1624 bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length);
1625 bnx2i_put_rq_buf(bnx2i_conn, 1);
1626 } else
1627 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1628
1629 spin_lock(&session->lock);
1630 hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
1631 memset(hdr, 0, sizeof(struct iscsi_hdr));
1632 hdr->opcode = reject->op_code;
1633 hdr->reason = reject->reason;
1634 hton24(hdr->dlength, reject->data_length);
1635 hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn);
1636 hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn);
1637 hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
1638 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
1639 reject->data_length);
1640 spin_unlock(&session->lock);
1641}
1642
1643/**
1644 * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion
1645 * @session: iscsi session pointer
1646 * @bnx2i_conn: iscsi connection pointer
1647 * @cqe: pointer to newly DMA'ed CQE entry for processing
1648 *
1649 * process command cleanup response CQE during conn shutdown or error recovery
1650 */
1651static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
1652 struct bnx2i_conn *bnx2i_conn,
1653 struct cqe *cqe)
1654{
1655 struct bnx2i_cleanup_response *cmd_clean_rsp;
1656 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1657 struct iscsi_task *task;
1658
1659 cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
1660 spin_lock(&session->lock);
1661 task = iscsi_itt_to_task(conn,
1662 cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
1663 if (!task)
1664 printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
1665 cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
1666 spin_unlock(&session->lock);
1667 complete(&bnx2i_conn->cmd_cleanup_cmpl);
1668}
1669
1670
1671
1672/**
1673 * bnx2i_process_new_cqes - process newly DMA'ed CQE's
1674 * @bnx2i_conn: iscsi connection
1675 *
1676 * this function is called by generic KCQ handler to process all pending CQE's
1677 */
1678static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1679{
1680 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1681 struct iscsi_session *session = conn->session;
1682 struct qp_info *qp = &bnx2i_conn->ep->qp;
1683 struct bnx2i_nop_in_msg *nopin;
1684 int tgt_async_msg;
1685
1686 while (1) {
1687 nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe;
1688 if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
1689 break;
1690
1691 if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx)))
1692 break;
1693
1694 tgt_async_msg = 0;
1695
1696 switch (nopin->op_code) {
1697 case ISCSI_OP_SCSI_CMD_RSP:
1698 case ISCSI_OP_SCSI_DATA_IN:
1699 bnx2i_process_scsi_cmd_resp(session, bnx2i_conn,
1700 qp->cq_cons_qe);
1701 break;
1702 case ISCSI_OP_LOGIN_RSP:
1703 bnx2i_process_login_resp(session, bnx2i_conn,
1704 qp->cq_cons_qe);
1705 break;
1706 case ISCSI_OP_SCSI_TMFUNC_RSP:
1707 bnx2i_process_tmf_resp(session, bnx2i_conn,
1708 qp->cq_cons_qe);
1709 break;
1710 case ISCSI_OP_LOGOUT_RSP:
1711 bnx2i_process_logout_resp(session, bnx2i_conn,
1712 qp->cq_cons_qe);
1713 break;
1714 case ISCSI_OP_NOOP_IN:
1715 if (bnx2i_process_nopin_mesg(session, bnx2i_conn,
1716 qp->cq_cons_qe))
1717 tgt_async_msg = 1;
1718 break;
1719 case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION:
1720 bnx2i_process_nopin_local_cmpl(session, bnx2i_conn,
1721 qp->cq_cons_qe);
1722 break;
1723 case ISCSI_OP_ASYNC_EVENT:
1724 bnx2i_process_async_mesg(session, bnx2i_conn,
1725 qp->cq_cons_qe);
1726 tgt_async_msg = 1;
1727 break;
1728 case ISCSI_OP_REJECT:
1729 bnx2i_process_reject_mesg(session, bnx2i_conn,
1730 qp->cq_cons_qe);
1731 break;
1732 case ISCSI_OPCODE_CLEANUP_RESPONSE:
1733 bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn,
1734 qp->cq_cons_qe);
1735 break;
1736 default:
1737 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
1738 nopin->op_code);
1739 }
1740
1741 if (!tgt_async_msg)
1742 bnx2i_conn->ep->num_active_cmds--;
1743
1744 /* clear out in production version only, till beta keep opcode
1745 * field intact, will be helpful in debugging (context dump)
1746 * nopin->op_code = 0;
1747 */
1748 qp->cqe_exp_seq_sn++;
1749 if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
1750 qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
1751
1752 if (qp->cq_cons_qe == qp->cq_last_qe) {
1753 qp->cq_cons_qe = qp->cq_first_qe;
1754 qp->cq_cons_idx = 0;
1755 } else {
1756 qp->cq_cons_qe++;
1757 qp->cq_cons_idx++;
1758 }
1759 }
1760 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1761}
1762
1763/**
1764 * bnx2i_fastpath_notification - process global event queue (KCQ)
1765 * @hba: adapter structure pointer
1766 * @new_cqe_kcqe: pointer to newly DMA'ed KCQE entry
1767 *
1768 * Fast path event notification handler, KCQ entry carries context id
1769 * of the connection that has 1 or more pending CQ entries
1770 */
1771static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
1772 struct iscsi_kcqe *new_cqe_kcqe)
1773{
1774 struct bnx2i_conn *conn;
1775 u32 iscsi_cid;
1776
1777 iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
1778 conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1779
1780 if (!conn) {
1781 printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
1782 return;
1783 }
1784 if (!conn->ep) {
1785 printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
1786 return;
1787 }
1788
1789 bnx2i_process_new_cqes(conn);
1790}
1791
1792
1793/**
1794 * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE
1795 * @hba: adapter structure pointer
1796 * @update_kcqe: kcqe pointer
1797 *
1798 * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration
1799 */
1800static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba,
1801 struct iscsi_kcqe *update_kcqe)
1802{
1803 struct bnx2i_conn *conn;
1804 u32 iscsi_cid;
1805
1806 iscsi_cid = update_kcqe->iscsi_conn_id;
1807 conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1808
1809 if (!conn) {
1810 printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid);
1811 return;
1812 }
1813 if (!conn->ep) {
1814 printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid);
1815 return;
1816 }
1817
1818 if (update_kcqe->completion_status) {
1819 printk(KERN_ALERT "request failed cid %x\n", iscsi_cid);
1820 conn->ep->state = EP_STATE_ULP_UPDATE_FAILED;
1821 } else
1822 conn->ep->state = EP_STATE_ULP_UPDATE_COMPL;
1823
1824 wake_up_interruptible(&conn->ep->ofld_wait);
1825}
1826
1827
1828/**
1829 * bnx2i_recovery_que_add_conn - add connection to recovery queue
1830 * @hba: adapter structure pointer
1831 * @bnx2i_conn: iscsi connection
1832 *
1833 * Add connection to recovery queue and schedule adapter eh worker
1834 */
1835static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba,
1836 struct bnx2i_conn *bnx2i_conn)
1837{
1838 iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data,
1839 ISCSI_ERR_CONN_FAILED);
1840}
1841
1842
1843/**
1844 * bnx2i_process_tcp_error - process error notification on a given connection
1845 *
1846 * @hba: adapter structure pointer
1847 * @tcp_err: tcp error kcqe pointer
1848 *
1849 * handles tcp level error notifications from FW.
1850 */
1851static void bnx2i_process_tcp_error(struct bnx2i_hba *hba,
1852 struct iscsi_kcqe *tcp_err)
1853{
1854 struct bnx2i_conn *bnx2i_conn;
1855 u32 iscsi_cid;
1856
1857 iscsi_cid = tcp_err->iscsi_conn_id;
1858 bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1859
1860 if (!bnx2i_conn) {
1861 printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
1862 return;
1863 }
1864
1865 printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n",
1866 iscsi_cid, tcp_err->completion_status);
1867 bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
1868}
1869
1870
1871/**
1872 * bnx2i_process_iscsi_error - process error notification on a given connection
1873 * @hba: adapter structure pointer
1874 * @iscsi_err: iscsi error kcqe pointer
1875 *
1876 * handles iscsi error notifications from the FW. Firmware based in initial
1877 * handshake classifies iscsi protocol / TCP rfc violation into either
1878 * warning or error indications. If indication is of "Error" type, driver
1879 * will initiate session recovery for that connection/session. For
1880 * "Warning" type indication, driver will put out a system log message
1881 * (there will be only one message for each type for the life of the
1882 * session, this is to avoid un-necessarily overloading the system)
1883 */
1884static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
1885 struct iscsi_kcqe *iscsi_err)
1886{
1887 struct bnx2i_conn *bnx2i_conn;
1888 u32 iscsi_cid;
1889 char warn_notice[] = "iscsi_warning";
1890 char error_notice[] = "iscsi_error";
1891 char additional_notice[64];
1892 char *message;
1893 int need_recovery;
1894 u64 err_mask64;
1895
1896 iscsi_cid = iscsi_err->iscsi_conn_id;
1897 bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1898 if (!bnx2i_conn) {
1899 printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
1900 return;
1901 }
1902
1903 err_mask64 = (0x1ULL << iscsi_err->completion_status);
1904
1905 if (err_mask64 & iscsi_error_mask) {
1906 need_recovery = 0;
1907 message = warn_notice;
1908 } else {
1909 need_recovery = 1;
1910 message = error_notice;
1911 }
1912
1913 switch (iscsi_err->completion_status) {
1914 case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
1915 strcpy(additional_notice, "hdr digest err");
1916 break;
1917 case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
1918 strcpy(additional_notice, "data digest err");
1919 break;
1920 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
1921 strcpy(additional_notice, "wrong opcode rcvd");
1922 break;
1923 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
1924 strcpy(additional_notice, "AHS len > 0 rcvd");
1925 break;
1926 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
1927 strcpy(additional_notice, "invalid ITT rcvd");
1928 break;
1929 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
1930 strcpy(additional_notice, "wrong StatSN rcvd");
1931 break;
1932 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
1933 strcpy(additional_notice, "wrong DataSN rcvd");
1934 break;
1935 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
1936 strcpy(additional_notice, "pend R2T violation");
1937 break;
1938 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
1939 strcpy(additional_notice, "ERL0, UO");
1940 break;
1941 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
1942 strcpy(additional_notice, "ERL0, U1");
1943 break;
1944 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
1945 strcpy(additional_notice, "ERL0, U2");
1946 break;
1947 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
1948 strcpy(additional_notice, "ERL0, U3");
1949 break;
1950 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
1951 strcpy(additional_notice, "ERL0, U4");
1952 break;
1953 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
1954 strcpy(additional_notice, "ERL0, U5");
1955 break;
1956 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
1957 strcpy(additional_notice, "ERL0, U6");
1958 break;
1959 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
1960 strcpy(additional_notice, "invalid resi len");
1961 break;
1962 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
1963 strcpy(additional_notice, "MRDSL violation");
1964 break;
1965 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
1966 strcpy(additional_notice, "F-bit not set");
1967 break;
1968 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
1969 strcpy(additional_notice, "invalid TTT");
1970 break;
1971 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
1972 strcpy(additional_notice, "invalid DataSN");
1973 break;
1974 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
1975 strcpy(additional_notice, "burst len violation");
1976 break;
1977 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
1978 strcpy(additional_notice, "buf offset violation");
1979 break;
1980 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
1981 strcpy(additional_notice, "invalid LUN field");
1982 break;
1983 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
1984 strcpy(additional_notice, "invalid R2TSN field");
1985 break;
1986#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \
1987 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
1988 case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
1989 strcpy(additional_notice, "invalid cmd len1");
1990 break;
1991#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \
1992 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
1993 case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
1994 strcpy(additional_notice, "invalid cmd len2");
1995 break;
1996 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
1997 strcpy(additional_notice,
1998 "pend r2t exceeds MaxOutstandingR2T value");
1999 break;
2000 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
2001 strcpy(additional_notice, "TTT is rsvd");
2002 break;
2003 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
2004 strcpy(additional_notice, "MBL violation");
2005 break;
2006#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \
2007 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
2008 case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
2009 strcpy(additional_notice, "data seg len != 0");
2010 break;
2011 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
2012 strcpy(additional_notice, "reject pdu len error");
2013 break;
2014 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
2015 strcpy(additional_notice, "async pdu len error");
2016 break;
2017 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
2018 strcpy(additional_notice, "nopin pdu len error");
2019 break;
2020#define BNX2_ERR_PEND_R2T_IN_CLEANUP \
2021 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
2022 case BNX2_ERR_PEND_R2T_IN_CLEANUP:
2023 strcpy(additional_notice, "pend r2t in cleanup");
2024 break;
2025
2026 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
2027 strcpy(additional_notice, "IP fragments rcvd");
2028 break;
2029 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
2030 strcpy(additional_notice, "IP options error");
2031 break;
2032 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
2033 strcpy(additional_notice, "urgent flag error");
2034 break;
2035 default:
2036 printk(KERN_ALERT "iscsi_err - unknown err %x\n",
2037 iscsi_err->completion_status);
2038 }
2039
2040 if (need_recovery) {
2041 iscsi_conn_printk(KERN_ALERT,
2042 bnx2i_conn->cls_conn->dd_data,
2043 "bnx2i: %s - %s\n",
2044 message, additional_notice);
2045
2046 iscsi_conn_printk(KERN_ALERT,
2047 bnx2i_conn->cls_conn->dd_data,
2048 "conn_err - hostno %d conn %p, "
2049 "iscsi_cid %x cid %x\n",
2050 bnx2i_conn->hba->shost->host_no,
2051 bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid,
2052 bnx2i_conn->ep->ep_cid);
2053 bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
2054 } else
2055 if (!test_and_set_bit(iscsi_err->completion_status,
2056 (void *) &bnx2i_conn->violation_notified))
2057 iscsi_conn_printk(KERN_ALERT,
2058 bnx2i_conn->cls_conn->dd_data,
2059 "bnx2i: %s - %s\n",
2060 message, additional_notice);
2061}
2062
2063
2064/**
2065 * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion
2066 * @hba: adapter structure pointer
2067 * @conn_destroy: conn destroy kcqe pointer
2068 *
2069 * handles connection destroy completion request.
2070 */
2071static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
2072 struct iscsi_kcqe *conn_destroy)
2073{
2074 struct bnx2i_endpoint *ep;
2075
2076 ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id);
2077 if (!ep) {
2078 printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending "
2079 "offload request, unexpected complection\n");
2080 return;
2081 }
2082
2083 if (hba != ep->hba) {
2084 printk(KERN_ALERT "conn destroy- error hba mis-match\n");
2085 return;
2086 }
2087
2088 if (conn_destroy->completion_status) {
2089 printk(KERN_ALERT "conn_destroy_cmpl: op failed\n");
2090 ep->state = EP_STATE_CLEANUP_FAILED;
2091 } else
2092 ep->state = EP_STATE_CLEANUP_CMPL;
2093 wake_up_interruptible(&ep->ofld_wait);
2094}
2095
2096
2097/**
2098 * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion
2099 * @hba: adapter structure pointer
2100 * @ofld_kcqe: conn offload kcqe pointer
2101 *
2102 * handles initial connection offload completion, ep_connect() thread is
2103 * woken-up to continue with LLP connect process
2104 */
2105static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
2106 struct iscsi_kcqe *ofld_kcqe)
2107{
2108 u32 cid_addr;
2109 struct bnx2i_endpoint *ep;
2110 u32 cid_num;
2111
2112 ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
2113 if (!ep) {
2114 printk(KERN_ALERT "ofld_cmpl: no pend offload request\n");
2115 return;
2116 }
2117
2118 if (hba != ep->hba) {
2119 printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
2120 return;
2121 }
2122
2123 if (ofld_kcqe->completion_status) {
2124 if (ofld_kcqe->completion_status ==
2125 ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
2126 printk(KERN_ALERT "bnx2i: unable to allocate"
2127 " iSCSI context resources\n");
2128 ep->state = EP_STATE_OFLD_FAILED;
2129 } else {
2130 ep->state = EP_STATE_OFLD_COMPL;
2131 cid_addr = ofld_kcqe->iscsi_conn_context_id;
2132 cid_num = bnx2i_get_cid_num(ep);
2133 ep->ep_cid = cid_addr;
2134 ep->qp.ctx_base = NULL;
2135 }
2136 wake_up_interruptible(&ep->ofld_wait);
2137}
2138
2139/**
2140 * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE
2141 * @hba: adapter structure pointer
2142 * @update_kcqe: kcqe pointer
2143 *
2144 * Generic KCQ event handler/dispatcher
2145 */
2146static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[],
2147 u32 num_cqe)
2148{
2149 struct bnx2i_hba *hba = context;
2150 int i = 0;
2151 struct iscsi_kcqe *ikcqe = NULL;
2152
2153 while (i < num_cqe) {
2154 ikcqe = (struct iscsi_kcqe *) kcqe[i++];
2155
2156 if (ikcqe->op_code ==
2157 ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION)
2158 bnx2i_fastpath_notification(hba, ikcqe);
2159 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN)
2160 bnx2i_process_ofld_cmpl(hba, ikcqe);
2161 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN)
2162 bnx2i_process_update_conn_cmpl(hba, ikcqe);
2163 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) {
2164 if (ikcqe->completion_status !=
2165 ISCSI_KCQE_COMPLETION_STATUS_SUCCESS)
2166 bnx2i_iscsi_license_error(hba, ikcqe->\
2167 completion_status);
2168 else {
2169 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
2170 bnx2i_get_link_state(hba);
2171 printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: "
2172 "ISCSI_INIT passed\n",
2173 (u8)hba->pcidev->bus->number,
2174 hba->pci_devno,
2175 (u8)hba->pci_func);
2176
2177
2178 }
2179 } else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN)
2180 bnx2i_process_conn_destroy_cmpl(hba, ikcqe);
2181 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR)
2182 bnx2i_process_iscsi_error(hba, ikcqe);
2183 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR)
2184 bnx2i_process_tcp_error(hba, ikcqe);
2185 else
2186 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
2187 ikcqe->op_code);
2188 }
2189}
2190
2191
2192/**
2193 * bnx2i_indicate_netevent - Generic netdev event handler
2194 * @context: adapter structure pointer
2195 * @event: event type
2196 *
2197 * Handles four netdev events, NETDEV_UP, NETDEV_DOWN,
2198 * NETDEV_GOING_DOWN and NETDEV_CHANGE
2199 */
2200static void bnx2i_indicate_netevent(void *context, unsigned long event)
2201{
2202 struct bnx2i_hba *hba = context;
2203
2204 switch (event) {
2205 case NETDEV_UP:
2206 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
2207 bnx2i_send_fw_iscsi_init_msg(hba);
2208 break;
2209 case NETDEV_DOWN:
2210 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
2211 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
2212 break;
2213 case NETDEV_GOING_DOWN:
2214 set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
2215 iscsi_host_for_each_session(hba->shost,
2216 bnx2i_drop_session);
2217 break;
2218 case NETDEV_CHANGE:
2219 bnx2i_get_link_state(hba);
2220 break;
2221 default:
2222 ;
2223 }
2224}
2225
2226
2227/**
2228 * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion
2229 * @cm_sk: cnic sock structure pointer
2230 *
2231 * function callback exported via bnx2i - cnic driver interface to
2232 * indicate completion of option-2 TCP connect request.
2233 */
2234static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk)
2235{
2236 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2237
2238 if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
2239 ep->state = EP_STATE_CONNECT_FAILED;
2240 else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags))
2241 ep->state = EP_STATE_CONNECT_COMPL;
2242 else
2243 ep->state = EP_STATE_CONNECT_FAILED;
2244
2245 wake_up_interruptible(&ep->ofld_wait);
2246}
2247
2248
2249/**
2250 * bnx2i_cm_close_cmpl - process tcp conn close completion
2251 * @cm_sk: cnic sock structure pointer
2252 *
2253 * function callback exported via bnx2i - cnic driver interface to
2254 * indicate completion of option-2 graceful TCP connect shutdown
2255 */
2256static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk)
2257{
2258 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2259
2260 ep->state = EP_STATE_DISCONN_COMPL;
2261 wake_up_interruptible(&ep->ofld_wait);
2262}
2263
2264
2265/**
2266 * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion
2267 * @cm_sk: cnic sock structure pointer
2268 *
2269 * function callback exported via bnx2i - cnic driver interface to
2270 * indicate completion of option-2 abortive TCP connect termination
2271 */
2272static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk)
2273{
2274 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2275
2276 ep->state = EP_STATE_DISCONN_COMPL;
2277 wake_up_interruptible(&ep->ofld_wait);
2278}
2279
2280
2281/**
2282 * bnx2i_cm_remote_close - process received TCP FIN
2283 * @hba: adapter structure pointer
2284 * @update_kcqe: kcqe pointer
2285 *
2286 * function callback exported via bnx2i - cnic driver interface to indicate
2287 * async TCP events such as FIN
2288 */
2289static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
2290{
2291 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2292
2293 ep->state = EP_STATE_TCP_FIN_RCVD;
2294 if (ep->conn)
2295 bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
2296}
2297
2298/**
2299 * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup
2300 * @hba: adapter structure pointer
2301 * @update_kcqe: kcqe pointer
2302 *
2303 * function callback exported via bnx2i - cnic driver interface to
2304 * indicate async TCP events (RST) sent by the peer.
2305 */
2306static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
2307{
2308 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2309
2310 ep->state = EP_STATE_TCP_RST_RCVD;
2311 if (ep->conn)
2312 bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
2313}
2314
2315
2316static void bnx2i_send_nl_mesg(struct cnic_dev *dev, u32 msg_type,
2317 char *buf, u16 buflen)
2318{
2319 struct bnx2i_hba *hba;
2320
2321 hba = bnx2i_find_hba_for_cnic(dev);
2322 if (!hba)
2323 return;
2324
2325 if (iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport,
2326 msg_type, buf, buflen))
2327 printk(KERN_ALERT "bnx2i: private nl message send error\n");
2328
2329}
2330
2331
2332/**
2333 * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure
2334 * carrying callback function pointers
2335 *
2336 */
2337struct cnic_ulp_ops bnx2i_cnic_cb = {
2338 .cnic_init = bnx2i_ulp_init,
2339 .cnic_exit = bnx2i_ulp_exit,
2340 .cnic_start = bnx2i_start,
2341 .cnic_stop = bnx2i_stop,
2342 .indicate_kcqes = bnx2i_indicate_kcqe,
2343 .indicate_netevent = bnx2i_indicate_netevent,
2344 .cm_connect_complete = bnx2i_cm_connect_cmpl,
2345 .cm_close_complete = bnx2i_cm_close_cmpl,
2346 .cm_abort_complete = bnx2i_cm_abort_cmpl,
2347 .cm_remote_close = bnx2i_cm_remote_close,
2348 .cm_remote_abort = bnx2i_cm_remote_abort,
2349 .iscsi_nl_send_msg = bnx2i_send_nl_mesg,
2350 .owner = THIS_MODULE
2351};
2352
2353
2354/**
2355 * bnx2i_map_ep_dbell_regs - map connection doorbell registers
2356 * @ep: bnx2i endpoint
2357 *
2358 * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these
2359 * register in BAR #0. Whereas in 57710 these register are accessed by
2360 * mapping BAR #1
2361 */
2362int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
2363{
2364 u32 cid_num;
2365 u32 reg_off;
2366 u32 first_l4l5;
2367 u32 ctx_sz;
2368 u32 config2;
2369 resource_size_t reg_base;
2370
2371 cid_num = bnx2i_get_cid_num(ep);
2372
2373 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
2374 reg_base = pci_resource_start(ep->hba->pcidev,
2375 BNX2X_DOORBELL_PCI_BAR);
2376 reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE;
2377 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
2378 goto arm_cq;
2379 }
2380
2381 reg_base = ep->hba->netdev->base_addr;
2382 if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
2383 (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
2384 config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
2385 first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5;
2386 ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3;
2387 if (ctx_sz)
2388 reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE
2389 + PAGE_SIZE *
2390 (((cid_num - first_l4l5) / ctx_sz) + 256);
2391 else
2392 reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
2393 } else
2394 /* 5709 device in normal node and 5706/5708 devices */
2395 reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
2396
2397 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
2398 MB_KERNEL_CTX_SIZE);
2399 if (!ep->qp.ctx_base)
2400 return -ENOMEM;
2401
2402arm_cq:
2403 bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE);
2404 return 0;
2405}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
new file mode 100644
index 000000000000..ae4b2d588fd3
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -0,0 +1,438 @@
1/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 */
13
14#include "bnx2i.h"
15
16static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
17static u32 adapter_count;
18static int bnx2i_reg_device;
19
20#define DRV_MODULE_NAME "bnx2i"
21#define DRV_MODULE_VERSION "2.0.1d"
22#define DRV_MODULE_RELDATE "Mar 25, 2009"
23
24static char version[] __devinitdata =
25 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
26 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
27
28
29MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
30MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
31MODULE_LICENSE("GPL");
32MODULE_VERSION(DRV_MODULE_VERSION);
33
34static DEFINE_RWLOCK(bnx2i_dev_lock);
35
36unsigned int event_coal_div = 1;
37module_param(event_coal_div, int, 0664);
38MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
39
40unsigned int en_tcp_dack = 1;
41module_param(en_tcp_dack, int, 0664);
42MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
43
44unsigned int error_mask1 = 0x00;
45module_param(error_mask1, int, 0664);
46MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
47
48unsigned int error_mask2 = 0x00;
49module_param(error_mask2, int, 0664);
50MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
51
52unsigned int sq_size;
53module_param(sq_size, int, 0664);
54MODULE_PARM_DESC(sq_size, "Configure SQ size");
55
56unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT;
57module_param(rq_size, int, 0664);
58MODULE_PARM_DESC(rq_size, "Configure RQ size");
59
60u64 iscsi_error_mask = 0x00;
61
62static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ;
63
64
65/**
66 * bnx2i_identify_device - identifies NetXtreme II device type
67 * @hba: Adapter structure pointer
68 *
69 * This function identifies the NX2 device type and sets appropriate
70 * queue mailbox register access method, 5709 requires driver to
71 * access MBOX regs using *bin* mode
72 */
73void bnx2i_identify_device(struct bnx2i_hba *hba)
74{
75 hba->cnic_dev_type = 0;
76 if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) ||
77 (hba->pci_did == PCI_DEVICE_ID_NX2_5706S))
78 set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
79 else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) ||
80 (hba->pci_did == PCI_DEVICE_ID_NX2_5708S))
81 set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
82 else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) ||
83 (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
84 set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
85 hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
86 } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
87 hba->pci_did == PCI_DEVICE_ID_NX2_57711)
88 set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
89}
90
91
92/**
93 * get_adapter_list_head - returns head of adapter list
94 */
95struct bnx2i_hba *get_adapter_list_head(void)
96{
97 struct bnx2i_hba *hba = NULL;
98 struct bnx2i_hba *tmp_hba;
99
100 if (!adapter_count)
101 goto hba_not_found;
102
103 read_lock(&bnx2i_dev_lock);
104 list_for_each_entry(tmp_hba, &adapter_list, link) {
105 if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) {
106 hba = tmp_hba;
107 break;
108 }
109 }
110 read_unlock(&bnx2i_dev_lock);
111hba_not_found:
112 return hba;
113}
114
115
116/**
117 * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance
118 * @cnic: pointer to cnic device instance
119 *
120 */
121struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic)
122{
123 struct bnx2i_hba *hba, *temp;
124
125 read_lock(&bnx2i_dev_lock);
126 list_for_each_entry_safe(hba, temp, &adapter_list, link) {
127 if (hba->cnic == cnic) {
128 read_unlock(&bnx2i_dev_lock);
129 return hba;
130 }
131 }
132 read_unlock(&bnx2i_dev_lock);
133 return NULL;
134}
135
136
137/**
138 * bnx2i_start - cnic callback to initialize & start adapter instance
139 * @handle: transparent handle pointing to adapter structure
140 *
141 * This function maps adapter structure to pcidev structure and initiates
142 * firmware handshake to enable/initialize on chip iscsi components
143 * This bnx2i - cnic interface api callback is issued after following
144 * 2 conditions are met -
145 * a) underlying network interface is up (marked by event 'NETDEV_UP'
146 * from netdev
147 * b) bnx2i adapter instance is registered
148 */
149void bnx2i_start(void *handle)
150{
151#define BNX2I_INIT_POLL_TIME (1000 / HZ)
152 struct bnx2i_hba *hba = handle;
153 int i = HZ;
154
155 bnx2i_send_fw_iscsi_init_msg(hba);
156 while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
157 msleep(BNX2I_INIT_POLL_TIME);
158}
159
160
161/**
162 * bnx2i_stop - cnic callback to shutdown adapter instance
163 * @handle: transparent handle pointing to adapter structure
164 *
165 * driver checks if adapter is already in shutdown mode, if not start
166 * the shutdown process
167 */
168void bnx2i_stop(void *handle)
169{
170 struct bnx2i_hba *hba = handle;
171
172 /* check if cleanup happened in GOING_DOWN context */
173 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
174 if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
175 &hba->adapter_state))
176 iscsi_host_for_each_session(hba->shost,
177 bnx2i_drop_session);
178}
179
180/**
181 * bnx2i_register_device - register bnx2i adapter instance with the cnic driver
182 * @hba: Adapter instance to register
183 *
184 * registers bnx2i adapter instance with the cnic driver while holding the
185 * adapter structure lock
186 */
187void bnx2i_register_device(struct bnx2i_hba *hba)
188{
189 if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
190 test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
191 return;
192 }
193
194 hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
195
196 spin_lock(&hba->lock);
197 bnx2i_reg_device++;
198 spin_unlock(&hba->lock);
199
200 set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
201}
202
203
204/**
205 * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver
206 *
207 * registers all bnx2i adapter instances with the cnic driver while holding
208 * the global resource lock
209 */
210void bnx2i_reg_dev_all(void)
211{
212 struct bnx2i_hba *hba, *temp;
213
214 read_lock(&bnx2i_dev_lock);
215 list_for_each_entry_safe(hba, temp, &adapter_list, link)
216 bnx2i_register_device(hba);
217 read_unlock(&bnx2i_dev_lock);
218}
219
220
221/**
222 * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver
223 * @hba: Adapter instance to unregister
224 *
225 * registers bnx2i adapter instance with the cnic driver while holding
226 * the adapter structure lock
227 */
228static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
229{
230 if (hba->ofld_conns_active ||
231 !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) ||
232 test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state))
233 return;
234
235 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
236
237 spin_lock(&hba->lock);
238 bnx2i_reg_device--;
239 spin_unlock(&hba->lock);
240
241 /* ep_disconnect could come before NETDEV_DOWN, driver won't
242 * see NETDEV_DOWN as it already unregistered itself.
243 */
244 hba->adapter_state = 0;
245 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
246}
247
248/**
249 * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver
250 *
251 * unregisters all bnx2i adapter instances with the cnic driver while holding
252 * the global resource lock
253 */
254void bnx2i_unreg_dev_all(void)
255{
256 struct bnx2i_hba *hba, *temp;
257
258 read_lock(&bnx2i_dev_lock);
259 list_for_each_entry_safe(hba, temp, &adapter_list, link)
260 bnx2i_unreg_one_device(hba);
261 read_unlock(&bnx2i_dev_lock);
262}
263
264
265/**
266 * bnx2i_init_one - initialize an adapter instance and allocate memory resources
267 * @hba: bnx2i adapter instance
268 * @cnic: cnic device handle
269 *
270 * Global resource lock and host adapter lock is held during critical sections
271 * below. This routine is called from cnic_register_driver() context and
272 * work horse thread which does majority of device specific initialization
273 */
274static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
275{
276 int rc;
277
278 read_lock(&bnx2i_dev_lock);
279 if (bnx2i_reg_device &&
280 !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
281 rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
282 if (rc) /* duplicate registration */
283 printk(KERN_ERR "bnx2i- dev reg failed\n");
284
285 spin_lock(&hba->lock);
286 bnx2i_reg_device++;
287 hba->age++;
288 spin_unlock(&hba->lock);
289
290 set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
291 }
292 read_unlock(&bnx2i_dev_lock);
293
294 write_lock(&bnx2i_dev_lock);
295 list_add_tail(&hba->link, &adapter_list);
296 adapter_count++;
297 write_unlock(&bnx2i_dev_lock);
298 return 0;
299}
300
301
302/**
303 * bnx2i_ulp_init - initialize an adapter instance
304 * @dev: cnic device handle
305 *
306 * Called from cnic_register_driver() context to initialize all enumerated
307 * cnic devices. This routine allocate adapter structure and other
308 * device specific resources.
309 */
310void bnx2i_ulp_init(struct cnic_dev *dev)
311{
312 struct bnx2i_hba *hba;
313
314 /* Allocate a HBA structure for this device */
315 hba = bnx2i_alloc_hba(dev);
316 if (!hba) {
317 printk(KERN_ERR "bnx2i init: hba initialization failed\n");
318 return;
319 }
320
321 /* Get PCI related information and update hba struct members */
322 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
323 if (bnx2i_init_one(hba, dev)) {
324 printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
325 bnx2i_free_hba(hba);
326 } else
327 hba->cnic = dev;
328}
329
330
331/**
332 * bnx2i_ulp_exit - shuts down adapter instance and frees all resources
333 * @dev: cnic device handle
334 *
335 */
336void bnx2i_ulp_exit(struct cnic_dev *dev)
337{
338 struct bnx2i_hba *hba;
339
340 hba = bnx2i_find_hba_for_cnic(dev);
341 if (!hba) {
342 printk(KERN_INFO "bnx2i_ulp_exit: hba not "
343 "found, dev 0x%p\n", dev);
344 return;
345 }
346 write_lock(&bnx2i_dev_lock);
347 list_del_init(&hba->link);
348 adapter_count--;
349
350 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
351 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
352 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
353
354 spin_lock(&hba->lock);
355 bnx2i_reg_device--;
356 spin_unlock(&hba->lock);
357 }
358 write_unlock(&bnx2i_dev_lock);
359
360 bnx2i_free_hba(hba);
361}
362
363
364/**
365 * bnx2i_mod_init - module init entry point
366 *
367 * initialize any driver wide global data structures such as endpoint pool,
368 * tcp port manager/queue, sysfs. finally driver will register itself
369 * with the cnic module
370 */
371static int __init bnx2i_mod_init(void)
372{
373 int err;
374
375 printk(KERN_INFO "%s", version);
376
377 if (!is_power_of_2(sq_size))
378 sq_size = roundup_pow_of_two(sq_size);
379
380 bnx2i_scsi_xport_template =
381 iscsi_register_transport(&bnx2i_iscsi_transport);
382 if (!bnx2i_scsi_xport_template) {
383 printk(KERN_ERR "Could not register bnx2i transport.\n");
384 err = -ENOMEM;
385 goto out;
386 }
387
388 err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb);
389 if (err) {
390 printk(KERN_ERR "Could not register bnx2i cnic driver.\n");
391 goto unreg_xport;
392 }
393
394 return 0;
395
396unreg_xport:
397 iscsi_unregister_transport(&bnx2i_iscsi_transport);
398out:
399 return err;
400}
401
402
403/**
404 * bnx2i_mod_exit - module cleanup/exit entry point
405 *
406 * Global resource lock and host adapter lock is held during critical sections
407 * in this function. Driver will browse through the adapter list, cleans-up
408 * each instance, unregisters iscsi transport name and finally driver will
409 * unregister itself with the cnic module
410 */
411static void __exit bnx2i_mod_exit(void)
412{
413 struct bnx2i_hba *hba;
414
415 write_lock(&bnx2i_dev_lock);
416 while (!list_empty(&adapter_list)) {
417 hba = list_entry(adapter_list.next, struct bnx2i_hba, link);
418 list_del(&hba->link);
419 adapter_count--;
420
421 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
422 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
423 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
424 bnx2i_reg_device--;
425 }
426
427 write_unlock(&bnx2i_dev_lock);
428 bnx2i_free_hba(hba);
429 write_lock(&bnx2i_dev_lock);
430 }
431 write_unlock(&bnx2i_dev_lock);
432
433 iscsi_unregister_transport(&bnx2i_iscsi_transport);
434 cnic_unregister_driver(CNIC_ULP_ISCSI);
435}
436
437module_init(bnx2i_mod_init);
438module_exit(bnx2i_mod_exit);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
new file mode 100644
index 000000000000..f7412196f2f8
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -0,0 +1,2064 @@
1/*
2 * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
3 *
4 * Copyright (c) 2006 - 2009 Broadcom Corporation
5 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
6 * Copyright (c) 2007, 2008 Mike Christie
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 *
12 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
13 */
14
15#include <scsi/scsi_tcq.h>
16#include <scsi/libiscsi.h>
17#include "bnx2i.h"
18
19struct scsi_transport_template *bnx2i_scsi_xport_template;
20struct iscsi_transport bnx2i_iscsi_transport;
21static struct scsi_host_template bnx2i_host_template;
22
23/*
24 * Global endpoint resource info
25 */
26static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
27
28
29static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
30{
31 int retval = 0;
32
33 if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
34 test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
35 test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
36 retval = -EPERM;
37 return retval;
38}
39
40/**
41 * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks
42 * @cmd: iscsi cmd struct pointer
43 * @buf_off: absolute buffer offset
44 * @start_bd_off: u32 pointer to return the offset within the BD
45 * indicated by 'start_bd_idx' on which 'buf_off' falls
46 * @start_bd_idx: index of the BD on which 'buf_off' falls
47 *
48 * identifies & marks various bd info for scsi command's imm data,
49 * unsolicited data and the first solicited data seq.
50 */
51static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
52 u32 *start_bd_off, u32 *start_bd_idx)
53{
54 struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
55 u32 cur_offset = 0;
56 u32 cur_bd_idx = 0;
57
58 if (buf_off) {
59 while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
60 cur_offset += bd_tbl->buffer_length;
61 cur_bd_idx++;
62 bd_tbl++;
63 }
64 }
65
66 *start_bd_off = buf_off - cur_offset;
67 *start_bd_idx = cur_bd_idx;
68}
69
70/**
71 * bnx2i_setup_write_cmd_bd_info - sets up BD various information
72 * @task: transport layer's cmd struct pointer
73 *
74 * identifies & marks various bd info for scsi command's immediate data,
75 * unsolicited data and first solicited data seq which includes BD start
76 * index & BD buf off. his function takes into account iscsi parameter such
77 * as immediate data and unsolicited data is support on this connection.
78 */
79static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
80{
81 struct bnx2i_cmd *cmd = task->dd_data;
82 u32 start_bd_offset;
83 u32 start_bd_idx;
84 u32 buffer_offset = 0;
85 u32 cmd_len = cmd->req.total_data_transfer_length;
86
87 /* if ImmediateData is turned off & IntialR2T is turned on,
88 * there will be no immediate or unsolicited data, just return.
89 */
90 if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
91 return;
92
93 /* Immediate data */
94 buffer_offset += task->imm_count;
95 if (task->imm_count == cmd_len)
96 return;
97
98 if (iscsi_task_has_unsol_data(task)) {
99 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
100 &start_bd_offset, &start_bd_idx);
101 cmd->req.ud_buffer_offset = start_bd_offset;
102 cmd->req.ud_start_bd_index = start_bd_idx;
103 buffer_offset += task->unsol_r2t.data_length;
104 }
105
106 if (buffer_offset != cmd_len) {
107 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
108 &start_bd_offset, &start_bd_idx);
109 if ((start_bd_offset > task->conn->session->first_burst) ||
110 (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
111 int i = 0;
112
113 iscsi_conn_printk(KERN_ALERT, task->conn,
114 "bnx2i- error, buf offset 0x%x "
115 "bd_valid %d use_sg %d\n",
116 buffer_offset, cmd->io_tbl.bd_valid,
117 scsi_sg_count(cmd->scsi_cmd));
118 for (i = 0; i < cmd->io_tbl.bd_valid; i++)
119 iscsi_conn_printk(KERN_ALERT, task->conn,
120 "bnx2i err, bd[%d]: len %x\n",
121 i, cmd->io_tbl.bd_tbl[i].\
122 buffer_length);
123 }
124 cmd->req.sd_buffer_offset = start_bd_offset;
125 cmd->req.sd_start_bd_index = start_bd_idx;
126 }
127}
128
129
130
131/**
132 * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table
133 * @hba: adapter instance
134 * @cmd: iscsi cmd struct pointer
135 *
136 * map SG list
137 */
138static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
139{
140 struct scsi_cmnd *sc = cmd->scsi_cmd;
141 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
142 struct scatterlist *sg;
143 int byte_count = 0;
144 int bd_count = 0;
145 int sg_count;
146 int sg_len;
147 u64 addr;
148 int i;
149
150 BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
151
152 sg_count = scsi_dma_map(sc);
153
154 scsi_for_each_sg(sc, sg, sg_count, i) {
155 sg_len = sg_dma_len(sg);
156 addr = (u64) sg_dma_address(sg);
157 bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
158 bd[bd_count].buffer_addr_hi = addr >> 32;
159 bd[bd_count].buffer_length = sg_len;
160 bd[bd_count].flags = 0;
161 if (bd_count == 0)
162 bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
163
164 byte_count += sg_len;
165 bd_count++;
166 }
167
168 if (bd_count)
169 bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
170
171 BUG_ON(byte_count != scsi_bufflen(sc));
172 return bd_count;
173}
174
175/**
176 * bnx2i_iscsi_map_sg_list - maps SG list
177 * @cmd: iscsi cmd struct pointer
178 *
179 * creates BD list table for the command
180 */
181static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
182{
183 int bd_count;
184
185 bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
186 if (!bd_count) {
187 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
188
189 bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
190 bd[0].buffer_length = bd[0].flags = 0;
191 }
192 cmd->io_tbl.bd_valid = bd_count;
193}
194
195
196/**
197 * bnx2i_iscsi_unmap_sg_list - unmaps SG list
198 * @cmd: iscsi cmd struct pointer
199 *
200 * unmap IO buffers and invalidate the BD table
201 */
202void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
203{
204 struct scsi_cmnd *sc = cmd->scsi_cmd;
205
206 if (cmd->io_tbl.bd_valid && sc) {
207 scsi_dma_unmap(sc);
208 cmd->io_tbl.bd_valid = 0;
209 }
210}
211
212static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
213{
214 memset(&cmd->req, 0x00, sizeof(cmd->req));
215 cmd->req.op_code = 0xFF;
216 cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
217 cmd->req.bd_list_addr_hi =
218 (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
219
220}
221
222
223/**
224 * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
225 * @hba: pointer to adapter instance
226 * @conn: pointer to iscsi connection
227 * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
228 *
229 * update iscsi cid table entry with connection pointer. This enables
230 * driver to quickly get hold of connection structure pointer in
231 * completion/interrupt thread using iscsi context ID
232 */
233static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
234 struct bnx2i_conn *bnx2i_conn,
235 u32 iscsi_cid)
236{
237 if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
238 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
239 "conn bind - entry #%d not free\n", iscsi_cid);
240 return -EBUSY;
241 }
242
243 hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
244 return 0;
245}
246
247
248/**
249 * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr
250 * @hba: pointer to adapter instance
251 * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
252 */
253struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
254 u16 iscsi_cid)
255{
256 if (!hba->cid_que.conn_cid_tbl) {
257 printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
258 return NULL;
259
260 } else if (iscsi_cid >= hba->max_active_conns) {
261 printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
262 return NULL;
263 }
264 return hba->cid_que.conn_cid_tbl[iscsi_cid];
265}
266
267
268/**
269 * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool
270 * @hba: pointer to adapter instance
271 */
272static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
273{
274 int idx;
275
276 if (!hba->cid_que.cid_free_cnt)
277 return -1;
278
279 idx = hba->cid_que.cid_q_cons_idx;
280 hba->cid_que.cid_q_cons_idx++;
281 if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
282 hba->cid_que.cid_q_cons_idx = 0;
283
284 hba->cid_que.cid_free_cnt--;
285 return hba->cid_que.cid_que[idx];
286}
287
288
289/**
290 * bnx2i_free_iscsi_cid - returns tcp port to free list
291 * @hba: pointer to adapter instance
292 * @iscsi_cid: iscsi context ID to free
293 */
294static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
295{
296 int idx;
297
298 if (iscsi_cid == (u16) -1)
299 return;
300
301 hba->cid_que.cid_free_cnt++;
302
303 idx = hba->cid_que.cid_q_prod_idx;
304 hba->cid_que.cid_que[idx] = iscsi_cid;
305 hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
306 hba->cid_que.cid_q_prod_idx++;
307 if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
308 hba->cid_que.cid_q_prod_idx = 0;
309}
310
311
312/**
313 * bnx2i_setup_free_cid_que - sets up free iscsi cid queue
314 * @hba: pointer to adapter instance
315 *
316 * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table,
317 * and initialize table attributes
318 */
319static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
320{
321 int mem_size;
322 int i;
323
324 mem_size = hba->max_active_conns * sizeof(u32);
325 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
326
327 hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
328 if (!hba->cid_que.cid_que_base)
329 return -ENOMEM;
330
331 mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
332 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
333 hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
334 if (!hba->cid_que.conn_cid_tbl) {
335 kfree(hba->cid_que.cid_que_base);
336 hba->cid_que.cid_que_base = NULL;
337 return -ENOMEM;
338 }
339
340 hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
341 hba->cid_que.cid_q_prod_idx = 0;
342 hba->cid_que.cid_q_cons_idx = 0;
343 hba->cid_que.cid_q_max_idx = hba->max_active_conns;
344 hba->cid_que.cid_free_cnt = hba->max_active_conns;
345
346 for (i = 0; i < hba->max_active_conns; i++) {
347 hba->cid_que.cid_que[i] = i;
348 hba->cid_que.conn_cid_tbl[i] = NULL;
349 }
350 return 0;
351}
352
353
354/**
355 * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources
356 * @hba: pointer to adapter instance
357 */
358static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
359{
360 kfree(hba->cid_que.cid_que_base);
361 hba->cid_que.cid_que_base = NULL;
362
363 kfree(hba->cid_que.conn_cid_tbl);
364 hba->cid_que.conn_cid_tbl = NULL;
365}
366
367
368/**
369 * bnx2i_alloc_ep - allocates ep structure from global pool
370 * @hba: pointer to adapter instance
371 *
372 * routine allocates a free endpoint structure from global pool and
373 * a tcp port to be used for this connection. Global resource lock,
374 * 'bnx2i_resc_lock' is held while accessing shared global data structures
375 */
376static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
377{
378 struct iscsi_endpoint *ep;
379 struct bnx2i_endpoint *bnx2i_ep;
380
381 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
382 if (!ep) {
383 printk(KERN_ERR "bnx2i: Could not allocate ep\n");
384 return NULL;
385 }
386
387 bnx2i_ep = ep->dd_data;
388 INIT_LIST_HEAD(&bnx2i_ep->link);
389 bnx2i_ep->state = EP_STATE_IDLE;
390 bnx2i_ep->hba = hba;
391 bnx2i_ep->hba_age = hba->age;
392 hba->ofld_conns_active++;
393 init_waitqueue_head(&bnx2i_ep->ofld_wait);
394 return ep;
395}
396
397
398/**
399 * bnx2i_free_ep - free endpoint
400 * @ep: pointer to iscsi endpoint structure
401 */
402static void bnx2i_free_ep(struct iscsi_endpoint *ep)
403{
404 struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
405 unsigned long flags;
406
407 spin_lock_irqsave(&bnx2i_resc_lock, flags);
408 bnx2i_ep->state = EP_STATE_IDLE;
409 bnx2i_ep->hba->ofld_conns_active--;
410
411 bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
412 if (bnx2i_ep->conn) {
413 bnx2i_ep->conn->ep = NULL;
414 bnx2i_ep->conn = NULL;
415 }
416
417 bnx2i_ep->hba = NULL;
418 spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
419 iscsi_destroy_endpoint(ep);
420}
421
422
423/**
424 * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command
425 * @hba: adapter instance pointer
426 * @session: iscsi session pointer
427 * @cmd: iscsi command structure
428 */
429static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
430 struct bnx2i_cmd *cmd)
431{
432 struct io_bdt *io = &cmd->io_tbl;
433 struct iscsi_bd *bd;
434
435 io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
436 ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
437 &io->bd_tbl_dma, GFP_KERNEL);
438 if (!io->bd_tbl) {
439 iscsi_session_printk(KERN_ERR, session, "Could not "
440 "allocate bdt.\n");
441 return -ENOMEM;
442 }
443 io->bd_valid = 0;
444 return 0;
445}
446
447/**
448 * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table
449 * @hba: adapter instance pointer
450 * @session: iscsi session pointer
451 * @cmd: iscsi command structure
452 */
453static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
454 struct iscsi_session *session)
455{
456 int i;
457
458 for (i = 0; i < session->cmds_max; i++) {
459 struct iscsi_task *task = session->cmds[i];
460 struct bnx2i_cmd *cmd = task->dd_data;
461
462 if (cmd->io_tbl.bd_tbl)
463 dma_free_coherent(&hba->pcidev->dev,
464 ISCSI_MAX_BDS_PER_CMD *
465 sizeof(struct iscsi_bd),
466 cmd->io_tbl.bd_tbl,
467 cmd->io_tbl.bd_tbl_dma);
468 }
469
470}
471
472
473/**
474 * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session
475 * @hba: adapter instance pointer
476 * @session: iscsi session pointer
477 */
478static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
479 struct iscsi_session *session)
480{
481 int i;
482
483 for (i = 0; i < session->cmds_max; i++) {
484 struct iscsi_task *task = session->cmds[i];
485 struct bnx2i_cmd *cmd = task->dd_data;
486
487 /* Anil */
488 task->hdr = &cmd->hdr;
489 task->hdr_max = sizeof(struct iscsi_hdr);
490
491 if (bnx2i_alloc_bdt(hba, session, cmd))
492 goto free_bdts;
493 }
494
495 return 0;
496
497free_bdts:
498 bnx2i_destroy_cmd_pool(hba, session);
499 return -ENOMEM;
500}
501
502
503/**
504 * bnx2i_setup_mp_bdt - allocate BD table resources
505 * @hba: pointer to adapter structure
506 *
507 * Allocate memory for dummy buffer and associated BD
508 * table to be used by middle path (MP) requests
509 */
510static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
511{
512 int rc = 0;
513 struct iscsi_bd *mp_bdt;
514 u64 addr;
515
516 hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
517 &hba->mp_bd_dma, GFP_KERNEL);
518 if (!hba->mp_bd_tbl) {
519 printk(KERN_ERR "unable to allocate Middle Path BDT\n");
520 rc = -1;
521 goto out;
522 }
523
524 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
525 &hba->dummy_buf_dma, GFP_KERNEL);
526 if (!hba->dummy_buffer) {
527 printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
528 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
529 hba->mp_bd_tbl, hba->mp_bd_dma);
530 hba->mp_bd_tbl = NULL;
531 rc = -1;
532 goto out;
533 }
534
535 mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
536 addr = (unsigned long) hba->dummy_buf_dma;
537 mp_bdt->buffer_addr_lo = addr & 0xffffffff;
538 mp_bdt->buffer_addr_hi = addr >> 32;
539 mp_bdt->buffer_length = PAGE_SIZE;
540 mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
541 ISCSI_BD_FIRST_IN_BD_CHAIN;
542out:
543 return rc;
544}
545
546
547/**
548 * bnx2i_free_mp_bdt - releases ITT back to free pool
549 * @hba: pointer to adapter instance
550 *
551 * free MP dummy buffer and associated BD table
552 */
553static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
554{
555 if (hba->mp_bd_tbl) {
556 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
557 hba->mp_bd_tbl, hba->mp_bd_dma);
558 hba->mp_bd_tbl = NULL;
559 }
560 if (hba->dummy_buffer) {
561 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
562 hba->dummy_buffer, hba->dummy_buf_dma);
563 hba->dummy_buffer = NULL;
564 }
565 return;
566}
567
568/**
569 * bnx2i_drop_session - notifies iscsid of connection error.
570 * @hba: adapter instance pointer
571 * @session: iscsi session pointer
572 *
573 * This notifies iscsid that there is a error, so it can initiate
574 * recovery.
575 *
576 * This relies on caller using the iscsi class iterator so the object
577 * is refcounted and does not disapper from under us.
578 */
579void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
580{
581 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
582}
583
584/**
585 * bnx2i_ep_destroy_list_add - add an entry to EP destroy list
586 * @hba: pointer to adapter instance
587 * @ep: pointer to endpoint (transport indentifier) structure
588 *
589 * EP destroy queue manager
590 */
591static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
592 struct bnx2i_endpoint *ep)
593{
594 write_lock_bh(&hba->ep_rdwr_lock);
595 list_add_tail(&ep->link, &hba->ep_destroy_list);
596 write_unlock_bh(&hba->ep_rdwr_lock);
597 return 0;
598}
599
600/**
601 * bnx2i_ep_destroy_list_del - add an entry to EP destroy list
602 *
603 * @hba: pointer to adapter instance
604 * @ep: pointer to endpoint (transport indentifier) structure
605 *
606 * EP destroy queue manager
607 */
608static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
609 struct bnx2i_endpoint *ep)
610{
611 write_lock_bh(&hba->ep_rdwr_lock);
612 list_del_init(&ep->link);
613 write_unlock_bh(&hba->ep_rdwr_lock);
614
615 return 0;
616}
617
618/**
619 * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
620 * @hba: pointer to adapter instance
621 * @ep: pointer to endpoint (transport indentifier) structure
622 *
623 * pending conn offload completion queue manager
624 */
625static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
626 struct bnx2i_endpoint *ep)
627{
628 write_lock_bh(&hba->ep_rdwr_lock);
629 list_add_tail(&ep->link, &hba->ep_ofld_list);
630 write_unlock_bh(&hba->ep_rdwr_lock);
631 return 0;
632}
633
634/**
635 * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
636 * @hba: pointer to adapter instance
637 * @ep: pointer to endpoint (transport indentifier) structure
638 *
639 * pending conn offload completion queue manager
640 */
641static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
642 struct bnx2i_endpoint *ep)
643{
644 write_lock_bh(&hba->ep_rdwr_lock);
645 list_del_init(&ep->link);
646 write_unlock_bh(&hba->ep_rdwr_lock);
647 return 0;
648}
649
650
651/**
652 * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints
653 *
654 * @hba: pointer to adapter instance
655 * @iscsi_cid: iscsi context ID to find
656 *
657 */
658struct bnx2i_endpoint *
659bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
660{
661 struct list_head *list;
662 struct list_head *tmp;
663 struct bnx2i_endpoint *ep;
664
665 read_lock_bh(&hba->ep_rdwr_lock);
666 list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
667 ep = (struct bnx2i_endpoint *)list;
668
669 if (ep->ep_iscsi_cid == iscsi_cid)
670 break;
671 ep = NULL;
672 }
673 read_unlock_bh(&hba->ep_rdwr_lock);
674
675 if (!ep)
676 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
677 return ep;
678}
679
680
681/**
682 * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list
683 * @hba: pointer to adapter instance
684 * @iscsi_cid: iscsi context ID to find
685 *
686 */
687struct bnx2i_endpoint *
688bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
689{
690 struct list_head *list;
691 struct list_head *tmp;
692 struct bnx2i_endpoint *ep;
693
694 read_lock_bh(&hba->ep_rdwr_lock);
695 list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
696 ep = (struct bnx2i_endpoint *)list;
697
698 if (ep->ep_iscsi_cid == iscsi_cid)
699 break;
700 ep = NULL;
701 }
702 read_unlock_bh(&hba->ep_rdwr_lock);
703
704 if (!ep)
705 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
706
707 return ep;
708}
709
710/**
711 * bnx2i_setup_host_queue_size - assigns shost->can_queue param
712 * @hba: pointer to adapter instance
713 * @shost: scsi host pointer
714 *
715 * Initializes 'can_queue' parameter based on how many outstanding commands
716 * the device can handle. Each device 5708/5709/57710 has different
717 * capabilities
718 */
719static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
720 struct Scsi_Host *shost)
721{
722 if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
723 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
724 else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
725 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
726 else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
727 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
728 else
729 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
730}
731
732
733/**
734 * bnx2i_alloc_hba - allocate and init adapter instance
735 * @cnic: cnic device pointer
736 *
737 * allocate & initialize adapter structure and call other
738 * support routines to do per adapter initialization
739 */
740struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
741{
742 struct Scsi_Host *shost;
743 struct bnx2i_hba *hba;
744
745 shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
746 if (!shost)
747 return NULL;
748 shost->dma_boundary = cnic->pcidev->dma_mask;
749 shost->transportt = bnx2i_scsi_xport_template;
750 shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
751 shost->max_channel = 0;
752 shost->max_lun = 512;
753 shost->max_cmd_len = 16;
754
755 hba = iscsi_host_priv(shost);
756 hba->shost = shost;
757 hba->netdev = cnic->netdev;
758 /* Get PCI related information and update hba struct members */
759 hba->pcidev = cnic->pcidev;
760 pci_dev_get(hba->pcidev);
761 hba->pci_did = hba->pcidev->device;
762 hba->pci_vid = hba->pcidev->vendor;
763 hba->pci_sdid = hba->pcidev->subsystem_device;
764 hba->pci_svid = hba->pcidev->subsystem_vendor;
765 hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
766 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
767 bnx2i_identify_device(hba);
768
769 bnx2i_identify_device(hba);
770 bnx2i_setup_host_queue_size(hba, shost);
771
772 if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
773 hba->regview = ioremap_nocache(hba->netdev->base_addr,
774 BNX2_MQ_CONFIG2);
775 if (!hba->regview)
776 goto ioreg_map_err;
777 } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
778 hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
779 if (!hba->regview)
780 goto ioreg_map_err;
781 }
782
783 if (bnx2i_setup_mp_bdt(hba))
784 goto mp_bdt_mem_err;
785
786 INIT_LIST_HEAD(&hba->ep_ofld_list);
787 INIT_LIST_HEAD(&hba->ep_destroy_list);
788 rwlock_init(&hba->ep_rdwr_lock);
789
790 hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
791
792 /* different values for 5708/5709/57710 */
793 hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
794
795 if (bnx2i_setup_free_cid_que(hba))
796 goto cid_que_err;
797
798 /* SQ/RQ/CQ size can be changed via sysfx interface */
799 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
800 if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
801 hba->max_sqes = sq_size;
802 else
803 hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
804 } else { /* 5706/5708/5709 */
805 if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
806 hba->max_sqes = sq_size;
807 else
808 hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
809 }
810
811 hba->max_rqes = rq_size;
812 hba->max_cqes = hba->max_sqes + rq_size;
813 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
814 if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
815 hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
816 } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
817 hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
818
819 hba->num_ccell = hba->max_sqes / 2;
820
821 spin_lock_init(&hba->lock);
822 mutex_init(&hba->net_dev_lock);
823
824 if (iscsi_host_add(shost, &hba->pcidev->dev))
825 goto free_dump_mem;
826 return hba;
827
828free_dump_mem:
829 bnx2i_release_free_cid_que(hba);
830cid_que_err:
831 bnx2i_free_mp_bdt(hba);
832mp_bdt_mem_err:
833 if (hba->regview) {
834 iounmap(hba->regview);
835 hba->regview = NULL;
836 }
837ioreg_map_err:
838 pci_dev_put(hba->pcidev);
839 scsi_host_put(shost);
840 return NULL;
841}
842
843/**
844 * bnx2i_free_hba- releases hba structure and resources held by the adapter
845 * @hba: pointer to adapter instance
846 *
847 * free adapter structure and call various cleanup routines.
848 */
849void bnx2i_free_hba(struct bnx2i_hba *hba)
850{
851 struct Scsi_Host *shost = hba->shost;
852
853 iscsi_host_remove(shost);
854 INIT_LIST_HEAD(&hba->ep_ofld_list);
855 INIT_LIST_HEAD(&hba->ep_destroy_list);
856 pci_dev_put(hba->pcidev);
857
858 if (hba->regview) {
859 iounmap(hba->regview);
860 hba->regview = NULL;
861 }
862 bnx2i_free_mp_bdt(hba);
863 bnx2i_release_free_cid_que(hba);
864 iscsi_host_free(shost);
865}
866
867/**
868 * bnx2i_conn_free_login_resources - free DMA resources used for login process
869 * @hba: pointer to adapter instance
870 * @bnx2i_conn: iscsi connection pointer
871 *
872 * Login related resources, mostly BDT & payload DMA memory is freed
873 */
874static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
875 struct bnx2i_conn *bnx2i_conn)
876{
877 if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
878 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
879 bnx2i_conn->gen_pdu.resp_bd_tbl,
880 bnx2i_conn->gen_pdu.resp_bd_dma);
881 bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
882 }
883
884 if (bnx2i_conn->gen_pdu.req_bd_tbl) {
885 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
886 bnx2i_conn->gen_pdu.req_bd_tbl,
887 bnx2i_conn->gen_pdu.req_bd_dma);
888 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
889 }
890
891 if (bnx2i_conn->gen_pdu.resp_buf) {
892 dma_free_coherent(&hba->pcidev->dev,
893 ISCSI_DEF_MAX_RECV_SEG_LEN,
894 bnx2i_conn->gen_pdu.resp_buf,
895 bnx2i_conn->gen_pdu.resp_dma_addr);
896 bnx2i_conn->gen_pdu.resp_buf = NULL;
897 }
898
899 if (bnx2i_conn->gen_pdu.req_buf) {
900 dma_free_coherent(&hba->pcidev->dev,
901 ISCSI_DEF_MAX_RECV_SEG_LEN,
902 bnx2i_conn->gen_pdu.req_buf,
903 bnx2i_conn->gen_pdu.req_dma_addr);
904 bnx2i_conn->gen_pdu.req_buf = NULL;
905 }
906}
907
908/**
909 * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop.
910 * @hba: pointer to adapter instance
911 * @bnx2i_conn: iscsi connection pointer
912 *
913 * Mgmt task DNA resources are allocated in this routine.
914 */
915static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
916 struct bnx2i_conn *bnx2i_conn)
917{
918 /* Allocate memory for login request/response buffers */
919 bnx2i_conn->gen_pdu.req_buf =
920 dma_alloc_coherent(&hba->pcidev->dev,
921 ISCSI_DEF_MAX_RECV_SEG_LEN,
922 &bnx2i_conn->gen_pdu.req_dma_addr,
923 GFP_KERNEL);
924 if (bnx2i_conn->gen_pdu.req_buf == NULL)
925 goto login_req_buf_failure;
926
927 bnx2i_conn->gen_pdu.req_buf_size = 0;
928 bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
929
930 bnx2i_conn->gen_pdu.resp_buf =
931 dma_alloc_coherent(&hba->pcidev->dev,
932 ISCSI_DEF_MAX_RECV_SEG_LEN,
933 &bnx2i_conn->gen_pdu.resp_dma_addr,
934 GFP_KERNEL);
935 if (bnx2i_conn->gen_pdu.resp_buf == NULL)
936 goto login_resp_buf_failure;
937
938 bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
939 bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
940
941 bnx2i_conn->gen_pdu.req_bd_tbl =
942 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
943 &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
944 if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
945 goto login_req_bd_tbl_failure;
946
947 bnx2i_conn->gen_pdu.resp_bd_tbl =
948 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
949 &bnx2i_conn->gen_pdu.resp_bd_dma,
950 GFP_KERNEL);
951 if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
952 goto login_resp_bd_tbl_failure;
953
954 return 0;
955
956login_resp_bd_tbl_failure:
957 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
958 bnx2i_conn->gen_pdu.req_bd_tbl,
959 bnx2i_conn->gen_pdu.req_bd_dma);
960 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
961
962login_req_bd_tbl_failure:
963 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
964 bnx2i_conn->gen_pdu.resp_buf,
965 bnx2i_conn->gen_pdu.resp_dma_addr);
966 bnx2i_conn->gen_pdu.resp_buf = NULL;
967login_resp_buf_failure:
968 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
969 bnx2i_conn->gen_pdu.req_buf,
970 bnx2i_conn->gen_pdu.req_dma_addr);
971 bnx2i_conn->gen_pdu.req_buf = NULL;
972login_req_buf_failure:
973 iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
974 "login resource alloc failed!!\n");
975 return -ENOMEM;
976
977}
978
979
980/**
981 * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table.
982 * @bnx2i_conn: iscsi connection pointer
983 *
984 * Allocates buffers and BD tables before shipping requests to cnic
985 * for PDUs prepared by 'iscsid' daemon
986 */
987static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
988{
989 struct iscsi_bd *bd_tbl;
990
991 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
992
993 bd_tbl->buffer_addr_hi =
994 (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
995 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
996 bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
997 bnx2i_conn->gen_pdu.req_buf;
998 bd_tbl->reserved0 = 0;
999 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1000 ISCSI_BD_FIRST_IN_BD_CHAIN;
1001
1002 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl;
1003 bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
1004 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
1005 bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
1006 bd_tbl->reserved0 = 0;
1007 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1008 ISCSI_BD_FIRST_IN_BD_CHAIN;
1009}
1010
1011
1012/**
1013 * bnx2i_iscsi_send_generic_request - called to send mgmt tasks.
1014 * @task: transport layer task pointer
1015 *
1016 * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login,
1017 * Nop-out and Logout requests flow through this path.
1018 */
1019static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
1020{
1021 struct bnx2i_cmd *cmd = task->dd_data;
1022 struct bnx2i_conn *bnx2i_conn = cmd->conn;
1023 int rc = 0;
1024 char *buf;
1025 int data_len;
1026
1027 bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
1028 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
1029 case ISCSI_OP_LOGIN:
1030 bnx2i_send_iscsi_login(bnx2i_conn, task);
1031 break;
1032 case ISCSI_OP_NOOP_OUT:
1033 data_len = bnx2i_conn->gen_pdu.req_buf_size;
1034 buf = bnx2i_conn->gen_pdu.req_buf;
1035 if (data_len)
1036 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1037 RESERVED_ITT,
1038 buf, data_len, 1);
1039 else
1040 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1041 RESERVED_ITT,
1042 NULL, 0, 1);
1043 break;
1044 case ISCSI_OP_LOGOUT:
1045 rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
1046 break;
1047 case ISCSI_OP_SCSI_TMFUNC:
1048 rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
1049 break;
1050 default:
1051 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
1052 "send_gen: unsupported op 0x%x\n",
1053 task->hdr->opcode);
1054 }
1055 return rc;
1056}
1057
1058
1059/**********************************************************************
1060 * SCSI-ML Interface
1061 **********************************************************************/
1062
1063/**
1064 * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
1065 * @sc: SCSI-ML command pointer
1066 * @cmd: iscsi cmd pointer
1067 */
1068static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
1069{
1070 u32 dword;
1071 int lpcnt;
1072 u8 *srcp;
1073 u32 *dstp;
1074 u32 scsi_lun[2];
1075
1076 int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
1077 cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
1078 cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
1079
1080 lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
1081 srcp = (u8 *) sc->cmnd;
1082 dstp = (u32 *) cmd->req.cdb;
1083 while (lpcnt--) {
1084 memcpy(&dword, (const void *) srcp, 4);
1085 *dstp = cpu_to_be32(dword);
1086 srcp += 4;
1087 dstp++;
1088 }
1089 if (sc->cmd_len & 0x3) {
1090 dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
1091 *dstp = cpu_to_be32(dword);
1092 }
1093}
1094
1095static void bnx2i_cleanup_task(struct iscsi_task *task)
1096{
1097 struct iscsi_conn *conn = task->conn;
1098 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1099 struct bnx2i_hba *hba = bnx2i_conn->hba;
1100
1101 /*
1102 * mgmt task or cmd was never sent to us to transmit.
1103 */
1104 if (!task->sc || task->state == ISCSI_TASK_PENDING)
1105 return;
1106 /*
1107 * need to clean-up task context to claim dma buffers
1108 */
1109 if (task->state == ISCSI_TASK_ABRT_TMF) {
1110 bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
1111
1112 spin_unlock_bh(&conn->session->lock);
1113 wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
1114 msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
1115 spin_lock_bh(&conn->session->lock);
1116 }
1117 bnx2i_iscsi_unmap_sg_list(task->dd_data);
1118}
1119
1120/**
1121 * bnx2i_mtask_xmit - transmit mtask to chip for further processing
1122 * @conn: transport layer conn structure pointer
1123 * @task: transport layer command structure pointer
1124 */
1125static int
1126bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
1127{
1128 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1129 struct bnx2i_cmd *cmd = task->dd_data;
1130
1131 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
1132
1133 bnx2i_setup_cmd_wqe_template(cmd);
1134 bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
1135 if (task->data_count) {
1136 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
1137 task->data_count);
1138 bnx2i_conn->gen_pdu.req_wr_ptr =
1139 bnx2i_conn->gen_pdu.req_buf + task->data_count;
1140 }
1141 cmd->conn = conn->dd_data;
1142 cmd->scsi_cmd = NULL;
1143 return bnx2i_iscsi_send_generic_request(task);
1144}
1145
1146/**
1147 * bnx2i_task_xmit - transmit iscsi command to chip for further processing
1148 * @task: transport layer command structure pointer
1149 *
1150 * maps SG buffers and send request to chip/firmware in the form of SQ WQE
1151 */
1152static int bnx2i_task_xmit(struct iscsi_task *task)
1153{
1154 struct iscsi_conn *conn = task->conn;
1155 struct iscsi_session *session = conn->session;
1156 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
1157 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1158 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1159 struct scsi_cmnd *sc = task->sc;
1160 struct bnx2i_cmd *cmd = task->dd_data;
1161 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
1162
1163 if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
1164 return -ENOTCONN;
1165
1166 if (!bnx2i_conn->is_bound)
1167 return -ENOTCONN;
1168
1169 /*
1170 * If there is no scsi_cmnd this must be a mgmt task
1171 */
1172 if (!sc)
1173 return bnx2i_mtask_xmit(conn, task);
1174
1175 bnx2i_setup_cmd_wqe_template(cmd);
1176 cmd->req.op_code = ISCSI_OP_SCSI_CMD;
1177 cmd->conn = bnx2i_conn;
1178 cmd->scsi_cmd = sc;
1179 cmd->req.total_data_transfer_length = scsi_bufflen(sc);
1180 cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
1181
1182 bnx2i_iscsi_map_sg_list(cmd);
1183 bnx2i_cpy_scsi_cdb(sc, cmd);
1184
1185 cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
1186 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1187 cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
1188 cmd->req.itt = task->itt |
1189 (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1190 bnx2i_setup_write_cmd_bd_info(task);
1191 } else {
1192 if (scsi_bufflen(sc))
1193 cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
1194 cmd->req.itt = task->itt |
1195 (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1196 }
1197
1198 cmd->req.num_bds = cmd->io_tbl.bd_valid;
1199 if (!cmd->io_tbl.bd_valid) {
1200 cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
1201 cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
1202 cmd->req.num_bds = 1;
1203 }
1204
1205 bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
1206 return 0;
1207}
1208
1209/**
1210 * bnx2i_session_create - create a new iscsi session
1211 * @cmds_max: max commands supported
1212 * @qdepth: scsi queue depth to support
1213 * @initial_cmdsn: initial iscsi CMDSN to be used for this session
1214 *
1215 * Creates a new iSCSI session instance on given device.
1216 */
1217static struct iscsi_cls_session *
1218bnx2i_session_create(struct iscsi_endpoint *ep,
1219 uint16_t cmds_max, uint16_t qdepth,
1220 uint32_t initial_cmdsn)
1221{
1222 struct Scsi_Host *shost;
1223 struct iscsi_cls_session *cls_session;
1224 struct bnx2i_hba *hba;
1225 struct bnx2i_endpoint *bnx2i_ep;
1226
1227 if (!ep) {
1228 printk(KERN_ERR "bnx2i: missing ep.\n");
1229 return NULL;
1230 }
1231
1232 bnx2i_ep = ep->dd_data;
1233 shost = bnx2i_ep->hba->shost;
1234 hba = iscsi_host_priv(shost);
1235 if (bnx2i_adapter_ready(hba))
1236 return NULL;
1237
1238 /*
1239 * user can override hw limit as long as it is within
1240 * the min/max.
1241 */
1242 if (cmds_max > hba->max_sqes)
1243 cmds_max = hba->max_sqes;
1244 else if (cmds_max < BNX2I_SQ_WQES_MIN)
1245 cmds_max = BNX2I_SQ_WQES_MIN;
1246
1247 cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
1248 cmds_max, sizeof(struct bnx2i_cmd),
1249 initial_cmdsn, ISCSI_MAX_TARGET);
1250 if (!cls_session)
1251 return NULL;
1252
1253 if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
1254 goto session_teardown;
1255 return cls_session;
1256
1257session_teardown:
1258 iscsi_session_teardown(cls_session);
1259 return NULL;
1260}
1261
1262
1263/**
1264 * bnx2i_session_destroy - destroys iscsi session
1265 * @cls_session: pointer to iscsi cls session
1266 *
1267 * Destroys previously created iSCSI session instance and releases
1268 * all resources held by it
1269 */
1270static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
1271{
1272 struct iscsi_session *session = cls_session->dd_data;
1273 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1274 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1275
1276 bnx2i_destroy_cmd_pool(hba, session);
1277 iscsi_session_teardown(cls_session);
1278}
1279
1280
1281/**
1282 * bnx2i_conn_create - create iscsi connection instance
1283 * @cls_session: pointer to iscsi cls session
1284 * @cid: iscsi cid as per rfc (not NX2's CID terminology)
1285 *
1286 * Creates a new iSCSI connection instance for a given session
1287 */
1288static struct iscsi_cls_conn *
1289bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
1290{
1291 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1292 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1293 struct bnx2i_conn *bnx2i_conn;
1294 struct iscsi_cls_conn *cls_conn;
1295 struct iscsi_conn *conn;
1296
1297 cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
1298 cid);
1299 if (!cls_conn)
1300 return NULL;
1301 conn = cls_conn->dd_data;
1302
1303 bnx2i_conn = conn->dd_data;
1304 bnx2i_conn->cls_conn = cls_conn;
1305 bnx2i_conn->hba = hba;
1306 /* 'ep' ptr will be assigned in bind() call */
1307 bnx2i_conn->ep = NULL;
1308 init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
1309
1310 if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
1311 iscsi_conn_printk(KERN_ALERT, conn,
1312 "conn_new: login resc alloc failed!!\n");
1313 goto free_conn;
1314 }
1315
1316 return cls_conn;
1317
1318free_conn:
1319 iscsi_conn_teardown(cls_conn);
1320 return NULL;
1321}
1322
1323/**
1324 * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
1325 * @cls_session: pointer to iscsi cls session
1326 * @cls_conn: pointer to iscsi cls conn
1327 * @transport_fd: 64-bit EP handle
1328 * @is_leading: leading connection on this session?
1329 *
1330 * Binds together iSCSI session instance, iSCSI connection instance
1331 * and the TCP connection. This routine returns error code if
1332 * TCP connection does not belong on the device iSCSI sess/conn
1333 * is bound
1334 */
1335static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
1336 struct iscsi_cls_conn *cls_conn,
1337 uint64_t transport_fd, int is_leading)
1338{
1339 struct iscsi_conn *conn = cls_conn->dd_data;
1340 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1341 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1342 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1343 struct bnx2i_endpoint *bnx2i_ep;
1344 struct iscsi_endpoint *ep;
1345 int ret_code;
1346
1347 ep = iscsi_lookup_endpoint(transport_fd);
1348 if (!ep)
1349 return -EINVAL;
1350
1351 bnx2i_ep = ep->dd_data;
1352 if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
1353 (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
1354 /* Peer disconnect via' FIN or RST */
1355 return -EINVAL;
1356
1357 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1358 return -EINVAL;
1359
1360 if (bnx2i_ep->hba != hba) {
1361 /* Error - TCP connection does not belong to this device
1362 */
1363 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1364 "conn bind, ep=0x%p (%s) does not",
1365 bnx2i_ep, bnx2i_ep->hba->netdev->name);
1366 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1367 "belong to hba (%s)\n",
1368 hba->netdev->name);
1369 return -EEXIST;
1370 }
1371
1372 bnx2i_ep->conn = bnx2i_conn;
1373 bnx2i_conn->ep = bnx2i_ep;
1374 bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
1375 bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
1376 bnx2i_conn->is_bound = 1;
1377
1378 ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
1379 bnx2i_ep->ep_iscsi_cid);
1380
1381 /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
1382 * driver needs to explicitly replenish RQ index during setup.
1383 */
1384 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1385 bnx2i_put_rq_buf(bnx2i_conn, 0);
1386
1387 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1388 return ret_code;
1389}
1390
1391
1392/**
1393 * bnx2i_conn_destroy - destroy iscsi connection instance & release resources
1394 * @cls_conn: pointer to iscsi cls conn
1395 *
1396 * Destroy an iSCSI connection instance and release memory resources held by
1397 * this connection
1398 */
1399static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
1400{
1401 struct iscsi_conn *conn = cls_conn->dd_data;
1402 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1403 struct Scsi_Host *shost;
1404 struct bnx2i_hba *hba;
1405
1406 shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
1407 hba = iscsi_host_priv(shost);
1408
1409 bnx2i_conn_free_login_resources(hba, bnx2i_conn);
1410 iscsi_conn_teardown(cls_conn);
1411}
1412
1413
1414/**
1415 * bnx2i_conn_get_param - return iscsi connection parameter to caller
1416 * @cls_conn: pointer to iscsi cls conn
1417 * @param: parameter type identifier
1418 * @buf: buffer pointer
1419 *
1420 * returns iSCSI connection parameters
1421 */
1422static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
1423 enum iscsi_param param, char *buf)
1424{
1425 struct iscsi_conn *conn = cls_conn->dd_data;
1426 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1427 int len = 0;
1428
1429 switch (param) {
1430 case ISCSI_PARAM_CONN_PORT:
1431 if (bnx2i_conn->ep)
1432 len = sprintf(buf, "%hu\n",
1433 bnx2i_conn->ep->cm_sk->dst_port);
1434 break;
1435 case ISCSI_PARAM_CONN_ADDRESS:
1436 if (bnx2i_conn->ep)
1437 len = sprintf(buf, NIPQUAD_FMT "\n",
1438 NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip));
1439 break;
1440 default:
1441 return iscsi_conn_get_param(cls_conn, param, buf);
1442 }
1443
1444 return len;
1445}
1446
1447/**
1448 * bnx2i_host_get_param - returns host (adapter) related parameters
1449 * @shost: scsi host pointer
1450 * @param: parameter type identifier
1451 * @buf: buffer pointer
1452 */
1453static int bnx2i_host_get_param(struct Scsi_Host *shost,
1454 enum iscsi_host_param param, char *buf)
1455{
1456 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1457 int len = 0;
1458
1459 switch (param) {
1460 case ISCSI_HOST_PARAM_HWADDRESS:
1461 len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
1462 break;
1463 case ISCSI_HOST_PARAM_NETDEV_NAME:
1464 len = sprintf(buf, "%s\n", hba->netdev->name);
1465 break;
1466 default:
1467 return iscsi_host_get_param(shost, param, buf);
1468 }
1469 return len;
1470}
1471
1472/**
1473 * bnx2i_conn_start - completes iscsi connection migration to FFP
1474 * @cls_conn: pointer to iscsi cls conn
1475 *
1476 * last call in FFP migration to handover iscsi conn to the driver
1477 */
1478static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
1479{
1480 struct iscsi_conn *conn = cls_conn->dd_data;
1481 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1482
1483 bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
1484 bnx2i_update_iscsi_conn(conn);
1485
1486 /*
1487 * this should normally not sleep for a long time so it should
1488 * not disrupt the caller.
1489 */
1490 bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
1491 bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1492 bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep;
1493 add_timer(&bnx2i_conn->ep->ofld_timer);
1494 /* update iSCSI context for this conn, wait for CNIC to complete */
1495 wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
1496 bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
1497
1498 if (signal_pending(current))
1499 flush_signals(current);
1500 del_timer_sync(&bnx2i_conn->ep->ofld_timer);
1501
1502 iscsi_conn_start(cls_conn);
1503 return 0;
1504}
1505
1506
1507/**
1508 * bnx2i_conn_get_stats - returns iSCSI stats
1509 * @cls_conn: pointer to iscsi cls conn
1510 * @stats: pointer to iscsi statistic struct
1511 */
1512static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1513 struct iscsi_stats *stats)
1514{
1515 struct iscsi_conn *conn = cls_conn->dd_data;
1516
1517 stats->txdata_octets = conn->txdata_octets;
1518 stats->rxdata_octets = conn->rxdata_octets;
1519 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
1520 stats->dataout_pdus = conn->dataout_pdus_cnt;
1521 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
1522 stats->datain_pdus = conn->datain_pdus_cnt;
1523 stats->r2t_pdus = conn->r2t_pdus_cnt;
1524 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
1525 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
1526 stats->custom_length = 3;
1527 strcpy(stats->custom[2].desc, "eh_abort_cnt");
1528 stats->custom[2].value = conn->eh_abort_cnt;
1529 stats->digest_err = 0;
1530 stats->timeout_err = 0;
1531 stats->custom_length = 0;
1532}
1533
1534
1535/**
1536 * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices
1537 * @dst_addr: target IP address
1538 *
1539 * check if route resolves to BNX2 device
1540 */
1541static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
1542{
1543 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1544 struct bnx2i_hba *hba;
1545 struct cnic_dev *cnic = NULL;
1546
1547 bnx2i_reg_dev_all();
1548
1549 hba = get_adapter_list_head();
1550 if (hba && hba->cnic)
1551 cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
1552 if (!cnic) {
1553 printk(KERN_ALERT "bnx2i: no route,"
1554 "can't connect using cnic\n");
1555 goto no_nx2_route;
1556 }
1557 hba = bnx2i_find_hba_for_cnic(cnic);
1558 if (!hba)
1559 goto no_nx2_route;
1560
1561 if (bnx2i_adapter_ready(hba)) {
1562 printk(KERN_ALERT "bnx2i: check route, hba not found\n");
1563 goto no_nx2_route;
1564 }
1565 if (hba->netdev->mtu > hba->mtu_supported) {
1566 printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
1567 hba->netdev->name, hba->netdev->mtu);
1568 printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
1569 hba->mtu_supported);
1570 goto no_nx2_route;
1571 }
1572 return hba;
1573no_nx2_route:
1574 return NULL;
1575}
1576
1577
1578/**
1579 * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
1580 * @hba: pointer to adapter instance
1581 * @ep: endpoint (transport indentifier) structure
1582 *
1583 * destroys cm_sock structure and on chip iscsi context
1584 */
1585static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
1586 struct bnx2i_endpoint *ep)
1587{
1588 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
1589 hba->cnic->cm_destroy(ep->cm_sk);
1590
1591 if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
1592 ep->state = EP_STATE_DISCONN_COMPL;
1593
1594 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
1595 ep->state == EP_STATE_DISCONN_TIMEDOUT) {
1596 printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump,"
1597 " NW/PCIe trace, driver msgs to developers"
1598 " for analysis\n");
1599 return 1;
1600 }
1601
1602 ep->state = EP_STATE_CLEANUP_START;
1603 init_timer(&ep->ofld_timer);
1604 ep->ofld_timer.expires = 10*HZ + jiffies;
1605 ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1606 ep->ofld_timer.data = (unsigned long) ep;
1607 add_timer(&ep->ofld_timer);
1608
1609 bnx2i_ep_destroy_list_add(hba, ep);
1610
1611 /* destroy iSCSI context, wait for it to complete */
1612 bnx2i_send_conn_destroy(hba, ep);
1613 wait_event_interruptible(ep->ofld_wait,
1614 (ep->state != EP_STATE_CLEANUP_START));
1615
1616 if (signal_pending(current))
1617 flush_signals(current);
1618 del_timer_sync(&ep->ofld_timer);
1619
1620 bnx2i_ep_destroy_list_del(hba, ep);
1621
1622 if (ep->state != EP_STATE_CLEANUP_CMPL)
1623 /* should never happen */
1624 printk(KERN_ALERT "bnx2i - conn destroy failed\n");
1625
1626 return 0;
1627}
1628
1629
1630/**
1631 * bnx2i_ep_connect - establish TCP connection to target portal
1632 * @shost: scsi host
1633 * @dst_addr: target IP address
1634 * @non_blocking: blocking or non-blocking call
1635 *
1636 * this routine initiates the TCP/IP connection by invoking Option-2 i/f
1637 * with l5_core and the CNIC. This is a multi-step process of resolving
1638 * route to target, create a iscsi connection context, handshaking with
1639 * CNIC module to create/initialize the socket struct and finally
1640 * sending down option-2 request to complete TCP 3-way handshake
1641 */
1642static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1643 struct sockaddr *dst_addr,
1644 int non_blocking)
1645{
1646 u32 iscsi_cid = BNX2I_CID_RESERVED;
1647 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1648 struct sockaddr_in6 *desti6;
1649 struct bnx2i_endpoint *bnx2i_ep;
1650 struct bnx2i_hba *hba;
1651 struct cnic_dev *cnic;
1652 struct cnic_sockaddr saddr;
1653 struct iscsi_endpoint *ep;
1654 int rc = 0;
1655
1656 if (shost)
1657 /* driver is given scsi host to work with */
1658 hba = iscsi_host_priv(shost);
1659 else
1660 /*
1661 * check if the given destination can be reached through
1662 * a iscsi capable NetXtreme2 device
1663 */
1664 hba = bnx2i_check_route(dst_addr);
1665 if (!hba) {
1666 rc = -ENOMEM;
1667 goto check_busy;
1668 }
1669
1670 cnic = hba->cnic;
1671 ep = bnx2i_alloc_ep(hba);
1672 if (!ep) {
1673 rc = -ENOMEM;
1674 goto check_busy;
1675 }
1676 bnx2i_ep = ep->dd_data;
1677
1678 mutex_lock(&hba->net_dev_lock);
1679 if (bnx2i_adapter_ready(hba)) {
1680 rc = -EPERM;
1681 goto net_if_down;
1682 }
1683
1684 bnx2i_ep->state = EP_STATE_IDLE;
1685 bnx2i_ep->ep_iscsi_cid = (u16) -1;
1686 bnx2i_ep->num_active_cmds = 0;
1687 iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
1688 if (iscsi_cid == -1) {
1689 printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n");
1690 rc = -ENOMEM;
1691 goto iscsi_cid_err;
1692 }
1693 bnx2i_ep->hba_age = hba->age;
1694
1695 rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
1696 if (rc != 0) {
1697 printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n");
1698 rc = -ENOMEM;
1699 goto qp_resc_err;
1700 }
1701
1702 bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
1703 bnx2i_ep->state = EP_STATE_OFLD_START;
1704 bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
1705
1706 init_timer(&bnx2i_ep->ofld_timer);
1707 bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
1708 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1709 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
1710 add_timer(&bnx2i_ep->ofld_timer);
1711
1712 bnx2i_send_conn_ofld_req(hba, bnx2i_ep);
1713
1714 /* Wait for CNIC hardware to setup conn context and return 'cid' */
1715 wait_event_interruptible(bnx2i_ep->ofld_wait,
1716 bnx2i_ep->state != EP_STATE_OFLD_START);
1717
1718 if (signal_pending(current))
1719 flush_signals(current);
1720 del_timer_sync(&bnx2i_ep->ofld_timer);
1721
1722 bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1723
1724 if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
1725 rc = -ENOSPC;
1726 goto conn_failed;
1727 }
1728
1729 rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
1730 iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
1731 if (rc) {
1732 rc = -EINVAL;
1733 goto conn_failed;
1734 }
1735
1736 bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
1737 bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
1738 clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
1739
1740 memset(&saddr, 0, sizeof(saddr));
1741 if (dst_addr->sa_family == AF_INET) {
1742 desti = (struct sockaddr_in *) dst_addr;
1743 saddr.remote.v4 = *desti;
1744 saddr.local.v4.sin_family = desti->sin_family;
1745 } else if (dst_addr->sa_family == AF_INET6) {
1746 desti6 = (struct sockaddr_in6 *) dst_addr;
1747 saddr.remote.v6 = *desti6;
1748 saddr.local.v6.sin6_family = desti6->sin6_family;
1749 }
1750
1751 bnx2i_ep->timestamp = jiffies;
1752 bnx2i_ep->state = EP_STATE_CONNECT_START;
1753 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
1754 rc = -EINVAL;
1755 goto conn_failed;
1756 } else
1757 rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
1758
1759 if (rc)
1760 goto release_ep;
1761
1762 if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
1763 goto release_ep;
1764 mutex_unlock(&hba->net_dev_lock);
1765 return ep;
1766
1767release_ep:
1768 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1769 mutex_unlock(&hba->net_dev_lock);
1770 return ERR_PTR(rc);
1771 }
1772conn_failed:
1773net_if_down:
1774iscsi_cid_err:
1775 bnx2i_free_qp_resc(hba, bnx2i_ep);
1776qp_resc_err:
1777 bnx2i_free_ep(ep);
1778 mutex_unlock(&hba->net_dev_lock);
1779check_busy:
1780 bnx2i_unreg_dev_all();
1781 return ERR_PTR(rc);
1782}
1783
1784
1785/**
1786 * bnx2i_ep_poll - polls for TCP connection establishement
1787 * @ep: TCP connection (endpoint) handle
1788 * @timeout_ms: timeout value in milli secs
1789 *
1790 * polls for TCP connect request to complete
1791 */
1792static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1793{
1794 struct bnx2i_endpoint *bnx2i_ep;
1795 int rc = 0;
1796
1797 bnx2i_ep = ep->dd_data;
1798 if ((bnx2i_ep->state == EP_STATE_IDLE) ||
1799 (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
1800 (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
1801 return -1;
1802 if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
1803 return 1;
1804
1805 rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
1806 ((bnx2i_ep->state ==
1807 EP_STATE_OFLD_FAILED) ||
1808 (bnx2i_ep->state ==
1809 EP_STATE_CONNECT_FAILED) ||
1810 (bnx2i_ep->state ==
1811 EP_STATE_CONNECT_COMPL)),
1812 msecs_to_jiffies(timeout_ms));
1813 if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
1814 rc = -1;
1815
1816 if (rc > 0)
1817 return 1;
1818 else if (!rc)
1819 return 0; /* timeout */
1820 else
1821 return rc;
1822}
1823
1824
1825/**
1826 * bnx2i_ep_tcp_conn_active - check EP state transition
1827 * @ep: endpoint pointer
1828 *
1829 * check if underlying TCP connection is active
1830 */
1831static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
1832{
1833 int ret;
1834 int cnic_dev_10g = 0;
1835
1836 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1837 cnic_dev_10g = 1;
1838
1839 switch (bnx2i_ep->state) {
1840 case EP_STATE_CONNECT_START:
1841 case EP_STATE_CLEANUP_FAILED:
1842 case EP_STATE_OFLD_FAILED:
1843 case EP_STATE_DISCONN_TIMEDOUT:
1844 ret = 0;
1845 break;
1846 case EP_STATE_CONNECT_COMPL:
1847 case EP_STATE_ULP_UPDATE_START:
1848 case EP_STATE_ULP_UPDATE_COMPL:
1849 case EP_STATE_TCP_FIN_RCVD:
1850 case EP_STATE_ULP_UPDATE_FAILED:
1851 ret = 1;
1852 break;
1853 case EP_STATE_TCP_RST_RCVD:
1854 ret = 0;
1855 break;
1856 case EP_STATE_CONNECT_FAILED:
1857 if (cnic_dev_10g)
1858 ret = 1;
1859 else
1860 ret = 0;
1861 break;
1862 default:
1863 ret = 0;
1864 }
1865
1866 return ret;
1867}
1868
1869
1870/**
1871 * bnx2i_ep_disconnect - executes TCP connection teardown process
1872 * @ep: TCP connection (endpoint) handle
1873 *
1874 * executes TCP connection teardown process
1875 */
1876static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
1877{
1878 struct bnx2i_endpoint *bnx2i_ep;
1879 struct bnx2i_conn *bnx2i_conn = NULL;
1880 struct iscsi_session *session = NULL;
1881 struct iscsi_conn *conn;
1882 struct cnic_dev *cnic;
1883 struct bnx2i_hba *hba;
1884
1885 bnx2i_ep = ep->dd_data;
1886
1887 /* driver should not attempt connection cleanup untill TCP_CONNECT
1888 * completes either successfully or fails. Timeout is 9-secs, so
1889 * wait for it to complete
1890 */
1891 while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
1892 !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
1893 msleep(250);
1894
1895 if (bnx2i_ep->conn) {
1896 bnx2i_conn = bnx2i_ep->conn;
1897 conn = bnx2i_conn->cls_conn->dd_data;
1898 session = conn->session;
1899
1900 spin_lock_bh(&session->lock);
1901 bnx2i_conn->is_bound = 0;
1902 spin_unlock_bh(&session->lock);
1903 }
1904
1905 hba = bnx2i_ep->hba;
1906 if (bnx2i_ep->state == EP_STATE_IDLE)
1907 goto return_bnx2i_ep;
1908 cnic = hba->cnic;
1909
1910 mutex_lock(&hba->net_dev_lock);
1911
1912 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
1913 goto free_resc;
1914 if (bnx2i_ep->hba_age != hba->age)
1915 goto free_resc;
1916
1917 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
1918 goto destory_conn;
1919
1920 bnx2i_ep->state = EP_STATE_DISCONN_START;
1921
1922 init_timer(&bnx2i_ep->ofld_timer);
1923 bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies;
1924 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1925 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
1926 add_timer(&bnx2i_ep->ofld_timer);
1927
1928 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
1929 int close = 0;
1930
1931 if (session) {
1932 spin_lock_bh(&session->lock);
1933 if (session->state == ISCSI_STATE_LOGGING_OUT)
1934 close = 1;
1935 spin_unlock_bh(&session->lock);
1936 }
1937 if (close)
1938 cnic->cm_close(bnx2i_ep->cm_sk);
1939 else
1940 cnic->cm_abort(bnx2i_ep->cm_sk);
1941 } else
1942 goto free_resc;
1943
1944 /* wait for option-2 conn teardown */
1945 wait_event_interruptible(bnx2i_ep->ofld_wait,
1946 bnx2i_ep->state != EP_STATE_DISCONN_START);
1947
1948 if (signal_pending(current))
1949 flush_signals(current);
1950 del_timer_sync(&bnx2i_ep->ofld_timer);
1951
1952destory_conn:
1953 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1954 mutex_unlock(&hba->net_dev_lock);
1955 return;
1956 }
1957free_resc:
1958 mutex_unlock(&hba->net_dev_lock);
1959 bnx2i_free_qp_resc(hba, bnx2i_ep);
1960return_bnx2i_ep:
1961 if (bnx2i_conn)
1962 bnx2i_conn->ep = NULL;
1963
1964 bnx2i_free_ep(ep);
1965
1966 if (!hba->ofld_conns_active)
1967 bnx2i_unreg_dev_all();
1968}
1969
1970
1971/**
1972 * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler
1973 * @buf: pointer to buffer containing iscsi path message
1974 *
1975 */
1976static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
1977{
1978 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1979 char *buf = (char *) params;
1980 u16 len = sizeof(*params);
1981
1982 /* handled by cnic driver */
1983 hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
1984 len);
1985
1986 return 0;
1987}
1988
1989
1990/*
1991 * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
1992 * used while registering with the scsi host and iSCSI transport module.
1993 */
1994static struct scsi_host_template bnx2i_host_template = {
1995 .module = THIS_MODULE,
1996 .name = "Broadcom Offload iSCSI Initiator",
1997 .proc_name = "bnx2i",
1998 .queuecommand = iscsi_queuecommand,
1999 .eh_abort_handler = iscsi_eh_abort,
2000 .eh_device_reset_handler = iscsi_eh_device_reset,
2001 .eh_target_reset_handler = iscsi_eh_target_reset,
2002 .can_queue = 1024,
2003 .max_sectors = 127,
2004 .cmd_per_lun = 32,
2005 .this_id = -1,
2006 .use_clustering = ENABLE_CLUSTERING,
2007 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
2008 .shost_attrs = bnx2i_dev_attributes,
2009};
2010
2011struct iscsi_transport bnx2i_iscsi_transport = {
2012 .owner = THIS_MODULE,
2013 .name = "bnx2i",
2014 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
2015 CAP_MULTI_R2T | CAP_DATADGST |
2016 CAP_DATA_PATH_OFFLOAD,
2017 .param_mask = ISCSI_MAX_RECV_DLENGTH |
2018 ISCSI_MAX_XMIT_DLENGTH |
2019 ISCSI_HDRDGST_EN |
2020 ISCSI_DATADGST_EN |
2021 ISCSI_INITIAL_R2T_EN |
2022 ISCSI_MAX_R2T |
2023 ISCSI_IMM_DATA_EN |
2024 ISCSI_FIRST_BURST |
2025 ISCSI_MAX_BURST |
2026 ISCSI_PDU_INORDER_EN |
2027 ISCSI_DATASEQ_INORDER_EN |
2028 ISCSI_ERL |
2029 ISCSI_CONN_PORT |
2030 ISCSI_CONN_ADDRESS |
2031 ISCSI_EXP_STATSN |
2032 ISCSI_PERSISTENT_PORT |
2033 ISCSI_PERSISTENT_ADDRESS |
2034 ISCSI_TARGET_NAME | ISCSI_TPGT |
2035 ISCSI_USERNAME | ISCSI_PASSWORD |
2036 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
2037 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
2038 ISCSI_LU_RESET_TMO |
2039 ISCSI_PING_TMO | ISCSI_RECV_TMO |
2040 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
2041 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME,
2042 .create_session = bnx2i_session_create,
2043 .destroy_session = bnx2i_session_destroy,
2044 .create_conn = bnx2i_conn_create,
2045 .bind_conn = bnx2i_conn_bind,
2046 .destroy_conn = bnx2i_conn_destroy,
2047 .set_param = iscsi_set_param,
2048 .get_conn_param = bnx2i_conn_get_param,
2049 .get_session_param = iscsi_session_get_param,
2050 .get_host_param = bnx2i_host_get_param,
2051 .start_conn = bnx2i_conn_start,
2052 .stop_conn = iscsi_conn_stop,
2053 .send_pdu = iscsi_conn_send_pdu,
2054 .xmit_task = bnx2i_task_xmit,
2055 .get_stats = bnx2i_conn_get_stats,
2056 /* TCP connect - disconnect - option-2 interface calls */
2057 .ep_connect = bnx2i_ep_connect,
2058 .ep_poll = bnx2i_ep_poll,
2059 .ep_disconnect = bnx2i_ep_disconnect,
2060 .set_path = bnx2i_nl_set_path,
2061 /* Error recovery timeout call */
2062 .session_recovery_timedout = iscsi_session_recovery_timedout,
2063 .cleanup_task = bnx2i_cleanup_task,
2064};
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
new file mode 100644
index 000000000000..96426b751eb2
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -0,0 +1,142 @@
1/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2004 - 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 */
11
12#include "bnx2i.h"
13
14/**
15 * bnx2i_dev_to_hba - maps dev pointer to adapter struct
16 * @dev: device pointer
17 *
18 * Map device to hba structure
19 */
20static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev)
21{
22 struct Scsi_Host *shost = class_to_shost(dev);
23 return iscsi_host_priv(shost);
24}
25
26
27/**
28 * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size
29 * @dev: device pointer
30 * @buf: buffer to return current SQ size parameter
31 *
32 * Returns current SQ size parameter, this paramater determines the number
33 * outstanding iSCSI commands supported on a connection
34 */
35static ssize_t bnx2i_show_sq_info(struct device *dev,
36 struct device_attribute *attr, char *buf)
37{
38 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
39
40 return sprintf(buf, "0x%x\n", hba->max_sqes);
41}
42
43
44/**
45 * bnx2i_set_sq_info - update send queue (SQ) size parameter
46 * @dev: device pointer
47 * @buf: buffer to return current SQ size parameter
48 * @count: parameter buffer size
49 *
50 * Interface for user to change shared queue size allocated for each conn
51 * Must be within SQ limits and a power of 2. For the latter this is needed
52 * because of how libiscsi preallocates tasks.
53 */
54static ssize_t bnx2i_set_sq_info(struct device *dev,
55 struct device_attribute *attr,
56 const char *buf, size_t count)
57{
58 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
59 u32 val;
60 int max_sq_size;
61
62 if (hba->ofld_conns_active)
63 goto skip_config;
64
65 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
66 max_sq_size = BNX2I_5770X_SQ_WQES_MAX;
67 else
68 max_sq_size = BNX2I_570X_SQ_WQES_MAX;
69
70 if (sscanf(buf, " 0x%x ", &val) > 0) {
71 if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) &&
72 (is_power_of_2(val)))
73 hba->max_sqes = val;
74 }
75
76 return count;
77
78skip_config:
79 printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n");
80 return 0;
81}
82
83
84/**
85 * bnx2i_show_ccell_info - returns command cell (HQ) size
86 * @dev: device pointer
87 * @buf: buffer to return current SQ size parameter
88 *
89 * returns per-connection TCP history queue size parameter
90 */
91static ssize_t bnx2i_show_ccell_info(struct device *dev,
92 struct device_attribute *attr, char *buf)
93{
94 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
95
96 return sprintf(buf, "0x%x\n", hba->num_ccell);
97}
98
99
100/**
101 * bnx2i_get_link_state - set command cell (HQ) size
102 * @dev: device pointer
103 * @buf: buffer to return current SQ size parameter
104 * @count: parameter buffer size
105 *
106 * updates per-connection TCP history queue size parameter
107 */
108static ssize_t bnx2i_set_ccell_info(struct device *dev,
109 struct device_attribute *attr,
110 const char *buf, size_t count)
111{
112 u32 val;
113 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
114
115 if (hba->ofld_conns_active)
116 goto skip_config;
117
118 if (sscanf(buf, " 0x%x ", &val) > 0) {
119 if ((val >= BNX2I_CCELLS_MIN) &&
120 (val <= BNX2I_CCELLS_MAX)) {
121 hba->num_ccell = val;
122 }
123 }
124
125 return count;
126
127skip_config:
128 printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n");
129 return 0;
130}
131
132
133static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR,
134 bnx2i_show_sq_info, bnx2i_set_sq_info);
135static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR,
136 bnx2i_show_ccell_info, bnx2i_set_ccell_info);
137
138struct device_attribute *bnx2i_dev_attributes[] = {
139 &dev_attr_sq_size,
140 &dev_attr_num_ccell,
141 NULL
142};
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h
index 59b0958d2d11..e3133b58e594 100644
--- a/drivers/scsi/cxgb3i/cxgb3i.h
+++ b/drivers/scsi/cxgb3i/cxgb3i.h
@@ -144,7 +144,6 @@ struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *);
144void cxgb3i_adapter_open(struct t3cdev *); 144void cxgb3i_adapter_open(struct t3cdev *);
145void cxgb3i_adapter_close(struct t3cdev *); 145void cxgb3i_adapter_close(struct t3cdev *);
146 146
147struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *);
148struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *, 147struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
149 struct net_device *); 148 struct net_device *);
150void cxgb3i_hba_host_remove(struct cxgb3i_hba *); 149void cxgb3i_hba_host_remove(struct cxgb3i_hba *);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
index 99c912547902..344fd53b9954 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
@@ -206,6 +206,31 @@ int cxgb3i_ddp_find_page_index(unsigned long pgsz)
206 return DDP_PGIDX_MAX; 206 return DDP_PGIDX_MAX;
207} 207}
208 208
209/**
210 * cxgb3i_ddp_adjust_page_table - adjust page table with PAGE_SIZE
211 * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
212 */
213int cxgb3i_ddp_adjust_page_table(void)
214{
215 int i;
216 unsigned int base_order, order;
217
218 if (PAGE_SIZE < (1UL << ddp_page_shift[0])) {
219 ddp_log_info("PAGE_SIZE 0x%lx too small, min. 0x%lx.\n",
220 PAGE_SIZE, 1UL << ddp_page_shift[0]);
221 return -EINVAL;
222 }
223
224 base_order = get_order(1UL << ddp_page_shift[0]);
225 order = get_order(1 << PAGE_SHIFT);
226 for (i = 0; i < DDP_PGIDX_MAX; i++) {
227 /* first is the kernel page size, then just doubling the size */
228 ddp_page_order[i] = order - base_order + i;
229 ddp_page_shift[i] = PAGE_SHIFT + i;
230 }
231 return 0;
232}
233
209static inline void ddp_gl_unmap(struct pci_dev *pdev, 234static inline void ddp_gl_unmap(struct pci_dev *pdev,
210 struct cxgb3i_gather_list *gl) 235 struct cxgb3i_gather_list *gl)
211{ 236{
@@ -598,30 +623,40 @@ int cxgb3i_adapter_ddp_info(struct t3cdev *tdev,
598 * release all the resource held by the ddp pagepod manager for a given 623 * release all the resource held by the ddp pagepod manager for a given
599 * adapter if needed 624 * adapter if needed
600 */ 625 */
601void cxgb3i_ddp_cleanup(struct t3cdev *tdev) 626
627static void ddp_cleanup(struct kref *kref)
602{ 628{
629 struct cxgb3i_ddp_info *ddp = container_of(kref,
630 struct cxgb3i_ddp_info,
631 refcnt);
603 int i = 0; 632 int i = 0;
633
634 ddp_log_info("kref release ddp 0x%p, t3dev 0x%p.\n", ddp, ddp->tdev);
635
636 ddp->tdev->ulp_iscsi = NULL;
637 while (i < ddp->nppods) {
638 struct cxgb3i_gather_list *gl = ddp->gl_map[i];
639 if (gl) {
640 int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
641 >> PPOD_PAGES_SHIFT;
642 ddp_log_info("t3dev 0x%p, ddp %d + %d.\n",
643 ddp->tdev, i, npods);
644 kfree(gl);
645 ddp_free_gl_skb(ddp, i, npods);
646 i += npods;
647 } else
648 i++;
649 }
650 cxgb3i_free_big_mem(ddp);
651}
652
653void cxgb3i_ddp_cleanup(struct t3cdev *tdev)
654{
604 struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi; 655 struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
605 656
606 ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp); 657 ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp);
607 658 if (ddp)
608 if (ddp) { 659 kref_put(&ddp->refcnt, ddp_cleanup);
609 tdev->ulp_iscsi = NULL;
610 while (i < ddp->nppods) {
611 struct cxgb3i_gather_list *gl = ddp->gl_map[i];
612 if (gl) {
613 int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
614 >> PPOD_PAGES_SHIFT;
615 ddp_log_info("t3dev 0x%p, ddp %d + %d.\n",
616 tdev, i, npods);
617 kfree(gl);
618 ddp_free_gl_skb(ddp, i, npods);
619 i += npods;
620 } else
621 i++;
622 }
623 cxgb3i_free_big_mem(ddp);
624 }
625} 660}
626 661
627/** 662/**
@@ -631,12 +666,13 @@ void cxgb3i_ddp_cleanup(struct t3cdev *tdev)
631 */ 666 */
632static void ddp_init(struct t3cdev *tdev) 667static void ddp_init(struct t3cdev *tdev)
633{ 668{
634 struct cxgb3i_ddp_info *ddp; 669 struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
635 struct ulp_iscsi_info uinfo; 670 struct ulp_iscsi_info uinfo;
636 unsigned int ppmax, bits; 671 unsigned int ppmax, bits;
637 int i, err; 672 int i, err;
638 673
639 if (tdev->ulp_iscsi) { 674 if (ddp) {
675 kref_get(&ddp->refcnt);
640 ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n", 676 ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
641 tdev, tdev->ulp_iscsi); 677 tdev, tdev->ulp_iscsi);
642 return; 678 return;
@@ -670,6 +706,7 @@ static void ddp_init(struct t3cdev *tdev)
670 ppmax * 706 ppmax *
671 sizeof(struct cxgb3i_gather_list *)); 707 sizeof(struct cxgb3i_gather_list *));
672 spin_lock_init(&ddp->map_lock); 708 spin_lock_init(&ddp->map_lock);
709 kref_init(&ddp->refcnt);
673 710
674 ddp->tdev = tdev; 711 ddp->tdev = tdev;
675 ddp->pdev = uinfo.pdev; 712 ddp->pdev = uinfo.pdev;
@@ -715,6 +752,17 @@ void cxgb3i_ddp_init(struct t3cdev *tdev)
715{ 752{
716 if (page_idx == DDP_PGIDX_MAX) { 753 if (page_idx == DDP_PGIDX_MAX) {
717 page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE); 754 page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
755
756 if (page_idx == DDP_PGIDX_MAX) {
757 ddp_log_info("system PAGE_SIZE %lu, update hw.\n",
758 PAGE_SIZE);
759 if (cxgb3i_ddp_adjust_page_table() < 0) {
760 ddp_log_info("PAGE_SIZE %lu, ddp disabled.\n",
761 PAGE_SIZE);
762 return;
763 }
764 page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
765 }
718 ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n", 766 ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
719 PAGE_SIZE, page_idx); 767 PAGE_SIZE, page_idx);
720 } 768 }
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
index 0d296de7cf32..87dd56b422bf 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
@@ -54,6 +54,7 @@ struct cxgb3i_gather_list {
54 * struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload 54 * struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload
55 * 55 *
56 * @list: list head to link elements 56 * @list: list head to link elements
57 * @refcnt: ref. count
57 * @tdev: pointer to t3cdev used by cxgb3 driver 58 * @tdev: pointer to t3cdev used by cxgb3 driver
58 * @max_txsz: max tx packet size for ddp 59 * @max_txsz: max tx packet size for ddp
59 * @max_rxsz: max rx packet size for ddp 60 * @max_rxsz: max rx packet size for ddp
@@ -70,6 +71,7 @@ struct cxgb3i_gather_list {
70 */ 71 */
71struct cxgb3i_ddp_info { 72struct cxgb3i_ddp_info {
72 struct list_head list; 73 struct list_head list;
74 struct kref refcnt;
73 struct t3cdev *tdev; 75 struct t3cdev *tdev;
74 struct pci_dev *pdev; 76 struct pci_dev *pdev;
75 unsigned int max_txsz; 77 unsigned int max_txsz;
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 9212400b9b13..74369a3f963b 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/inet.h> 14#include <linux/inet.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <net/dst.h>
16#include <net/tcp.h> 17#include <net/tcp.h>
17#include <scsi/scsi_cmnd.h> 18#include <scsi/scsi_cmnd.h>
18#include <scsi/scsi_device.h> 19#include <scsi/scsi_device.h>
@@ -178,7 +179,7 @@ void cxgb3i_adapter_close(struct t3cdev *t3dev)
178 * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device 179 * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device
179 * @t3dev: t3cdev adapter 180 * @t3dev: t3cdev adapter
180 */ 181 */
181struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) 182static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
182{ 183{
183 struct cxgb3i_adapter *snic; 184 struct cxgb3i_adapter *snic;
184 int i; 185 int i;
@@ -261,20 +262,27 @@ void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
261 262
262/** 263/**
263 * cxgb3i_ep_connect - establish TCP connection to target portal 264 * cxgb3i_ep_connect - establish TCP connection to target portal
265 * @shost: scsi host to use
264 * @dst_addr: target IP address 266 * @dst_addr: target IP address
265 * @non_blocking: blocking or non-blocking call 267 * @non_blocking: blocking or non-blocking call
266 * 268 *
267 * Initiates a TCP/IP connection to the dst_addr 269 * Initiates a TCP/IP connection to the dst_addr
268 */ 270 */
269static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr, 271static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost,
272 struct sockaddr *dst_addr,
270 int non_blocking) 273 int non_blocking)
271{ 274{
272 struct iscsi_endpoint *ep; 275 struct iscsi_endpoint *ep;
273 struct cxgb3i_endpoint *cep; 276 struct cxgb3i_endpoint *cep;
274 struct cxgb3i_hba *hba; 277 struct cxgb3i_hba *hba = NULL;
275 struct s3_conn *c3cn = NULL; 278 struct s3_conn *c3cn = NULL;
276 int err = 0; 279 int err = 0;
277 280
281 if (shost)
282 hba = iscsi_host_priv(shost);
283
284 cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba);
285
278 c3cn = cxgb3i_c3cn_create(); 286 c3cn = cxgb3i_c3cn_create();
279 if (!c3cn) { 287 if (!c3cn) {
280 cxgb3i_log_info("ep connect OOM.\n"); 288 cxgb3i_log_info("ep connect OOM.\n");
@@ -282,17 +290,27 @@ static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
282 goto release_conn; 290 goto release_conn;
283 } 291 }
284 292
285 err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr); 293 err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn,
294 (struct sockaddr_in *)dst_addr);
286 if (err < 0) { 295 if (err < 0) {
287 cxgb3i_log_info("ep connect failed.\n"); 296 cxgb3i_log_info("ep connect failed.\n");
288 goto release_conn; 297 goto release_conn;
289 } 298 }
299
290 hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev); 300 hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
291 if (!hba) { 301 if (!hba) {
292 err = -ENOSPC; 302 err = -ENOSPC;
293 cxgb3i_log_info("NOT going through cxgbi device.\n"); 303 cxgb3i_log_info("NOT going through cxgbi device.\n");
294 goto release_conn; 304 goto release_conn;
295 } 305 }
306
307 if (shost && hba != iscsi_host_priv(shost)) {
308 err = -ENOSPC;
309 cxgb3i_log_info("Could not connect through request host%u\n",
310 shost->host_no);
311 goto release_conn;
312 }
313
296 if (c3cn_is_closing(c3cn)) { 314 if (c3cn_is_closing(c3cn)) {
297 err = -ENOSPC; 315 err = -ENOSPC;
298 cxgb3i_log_info("ep connect unable to connect.\n"); 316 cxgb3i_log_info("ep connect unable to connect.\n");
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index e11c9c180f39..c1d5be4adf9c 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -1479,12 +1479,13 @@ static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
1479 return NULL; 1479 return NULL;
1480} 1480}
1481 1481
1482static struct rtable *find_route(__be32 saddr, __be32 daddr, 1482static struct rtable *find_route(struct net_device *dev,
1483 __be32 saddr, __be32 daddr,
1483 __be16 sport, __be16 dport) 1484 __be16 sport, __be16 dport)
1484{ 1485{
1485 struct rtable *rt; 1486 struct rtable *rt;
1486 struct flowi fl = { 1487 struct flowi fl = {
1487 .oif = 0, 1488 .oif = dev ? dev->ifindex : 0,
1488 .nl_u = { 1489 .nl_u = {
1489 .ip4_u = { 1490 .ip4_u = {
1490 .daddr = daddr, 1491 .daddr = daddr,
@@ -1573,36 +1574,40 @@ out_err:
1573 * 1574 *
1574 * return 0 if active open request is sent, < 0 otherwise. 1575 * return 0 if active open request is sent, < 0 otherwise.
1575 */ 1576 */
1576int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin) 1577int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
1578 struct sockaddr_in *usin)
1577{ 1579{
1578 struct rtable *rt; 1580 struct rtable *rt;
1579 struct net_device *dev;
1580 struct cxgb3i_sdev_data *cdata; 1581 struct cxgb3i_sdev_data *cdata;
1581 struct t3cdev *cdev; 1582 struct t3cdev *cdev;
1582 __be32 sipv4; 1583 __be32 sipv4;
1583 int err; 1584 int err;
1584 1585
1586 c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
1587
1585 if (usin->sin_family != AF_INET) 1588 if (usin->sin_family != AF_INET)
1586 return -EAFNOSUPPORT; 1589 return -EAFNOSUPPORT;
1587 1590
1588 c3cn->daddr.sin_port = usin->sin_port; 1591 c3cn->daddr.sin_port = usin->sin_port;
1589 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; 1592 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
1590 1593
1591 rt = find_route(c3cn->saddr.sin_addr.s_addr, 1594 rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
1592 c3cn->daddr.sin_addr.s_addr, 1595 c3cn->daddr.sin_addr.s_addr,
1593 c3cn->saddr.sin_port, 1596 c3cn->saddr.sin_port,
1594 c3cn->daddr.sin_port); 1597 c3cn->daddr.sin_port);
1595 if (rt == NULL) { 1598 if (rt == NULL) {
1596 c3cn_conn_debug("NO route to 0x%x, port %u.\n", 1599 c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n",
1597 c3cn->daddr.sin_addr.s_addr, 1600 c3cn->daddr.sin_addr.s_addr,
1598 ntohs(c3cn->daddr.sin_port)); 1601 ntohs(c3cn->daddr.sin_port),
1602 dev ? dev->name : "any");
1599 return -ENETUNREACH; 1603 return -ENETUNREACH;
1600 } 1604 }
1601 1605
1602 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { 1606 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
1603 c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n", 1607 c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n",
1604 c3cn->daddr.sin_addr.s_addr, 1608 c3cn->daddr.sin_addr.s_addr,
1605 ntohs(c3cn->daddr.sin_port)); 1609 ntohs(c3cn->daddr.sin_port),
1610 dev ? dev->name : "any");
1606 ip_rt_put(rt); 1611 ip_rt_put(rt);
1607 return -ENETUNREACH; 1612 return -ENETUNREACH;
1608 } 1613 }
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h
index ebfca960c0a9..6a1d86b1fafe 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h
@@ -169,7 +169,8 @@ void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
169void cxgb3i_sdev_remove(struct t3cdev *); 169void cxgb3i_sdev_remove(struct t3cdev *);
170 170
171struct s3_conn *cxgb3i_c3cn_create(void); 171struct s3_conn *cxgb3i_c3cn_create(void);
172int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *); 172int cxgb3i_c3cn_connect(struct net_device *, struct s3_conn *,
173 struct sockaddr_in *);
173void cxgb3i_c3cn_rx_credits(struct s3_conn *, int); 174void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
174int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *); 175int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
175void cxgb3i_c3cn_release(struct s3_conn *); 176void cxgb3i_c3cn_release(struct s3_conn *);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 43b8c51e98d0..fd0544f7da81 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -561,6 +561,12 @@ static int rdac_check_sense(struct scsi_device *sdev,
561 struct rdac_dh_data *h = get_rdac_data(sdev); 561 struct rdac_dh_data *h = get_rdac_data(sdev);
562 switch (sense_hdr->sense_key) { 562 switch (sense_hdr->sense_key) {
563 case NOT_READY: 563 case NOT_READY:
564 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
565 /* LUN Not Ready - Logical Unit Not Ready and is in
566 * the process of becoming ready
567 * Just retry.
568 */
569 return ADD_TO_MLQUEUE;
564 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81) 570 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
565 /* LUN Not Ready - Storage firmware incompatible 571 /* LUN Not Ready - Storage firmware incompatible
566 * Manual code synchonisation required. 572 * Manual code synchonisation required.
diff --git a/drivers/scsi/dpt/osd_util.h b/drivers/scsi/dpt/osd_util.h
index 4b56c0436ba2..b2613c2eaac7 100644
--- a/drivers/scsi/dpt/osd_util.h
+++ b/drivers/scsi/dpt/osd_util.h
@@ -342,7 +342,7 @@ uLONG osdGetThreadID(void);
342/* wakes up the specifed thread */ 342/* wakes up the specifed thread */
343void osdWakeThread(uLONG); 343void osdWakeThread(uLONG);
344 344
345/* osd sleep for x miliseconds */ 345/* osd sleep for x milliseconds */
346void osdSleep(uLONG); 346void osdSleep(uLONG);
347 347
348#define DPT_THREAD_PRIORITY_LOWEST 0x00 348#define DPT_THREAD_PRIORITY_LOWEST 0x00
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index be5099dd94b5..c7076ce25e21 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1825,7 +1825,7 @@ static int eata2x_queuecommand(struct scsi_cmnd *SCpnt,
1825 if (linked_comm && SCpnt->device->queue_depth > 2 1825 if (linked_comm && SCpnt->device->queue_depth > 2
1826 && TLDEV(SCpnt->device->type)) { 1826 && TLDEV(SCpnt->device->type)) {
1827 ha->cp_stat[i] = READY; 1827 ha->cp_stat[i] = READY;
1828 flush_dev(SCpnt->device, SCpnt->request->sector, ha, 0); 1828 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 0);
1829 return 0; 1829 return 0;
1830 } 1830 }
1831 1831
@@ -2144,13 +2144,13 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2144 if (!cpp->din) 2144 if (!cpp->din)
2145 input_only = 0; 2145 input_only = 0;
2146 2146
2147 if (SCpnt->request->sector < minsec) 2147 if (blk_rq_pos(SCpnt->request) < minsec)
2148 minsec = SCpnt->request->sector; 2148 minsec = blk_rq_pos(SCpnt->request);
2149 if (SCpnt->request->sector > maxsec) 2149 if (blk_rq_pos(SCpnt->request) > maxsec)
2150 maxsec = SCpnt->request->sector; 2150 maxsec = blk_rq_pos(SCpnt->request);
2151 2151
2152 sl[n] = SCpnt->request->sector; 2152 sl[n] = blk_rq_pos(SCpnt->request);
2153 ioseek += SCpnt->request->nr_sectors; 2153 ioseek += blk_rq_sectors(SCpnt->request);
2154 2154
2155 if (!n) 2155 if (!n)
2156 continue; 2156 continue;
@@ -2190,7 +2190,7 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2190 k = il[n]; 2190 k = il[n];
2191 cpp = &ha->cp[k]; 2191 cpp = &ha->cp[k];
2192 SCpnt = cpp->SCpnt; 2192 SCpnt = cpp->SCpnt;
2193 ll[n] = SCpnt->request->nr_sectors; 2193 ll[n] = blk_rq_sectors(SCpnt->request);
2194 pl[n] = SCpnt->serial_number; 2194 pl[n] = SCpnt->serial_number;
2195 2195
2196 if (!n) 2196 if (!n)
@@ -2236,12 +2236,12 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2236 cpp = &ha->cp[k]; 2236 cpp = &ha->cp[k];
2237 SCpnt = cpp->SCpnt; 2237 SCpnt = cpp->SCpnt;
2238 scmd_printk(KERN_INFO, SCpnt, 2238 scmd_printk(KERN_INFO, SCpnt,
2239 "%s pid %ld mb %d fc %d nr %d sec %ld ns %ld" 2239 "%s pid %ld mb %d fc %d nr %d sec %ld ns %u"
2240 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 2240 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
2241 (ihdlr ? "ihdlr" : "qcomm"), 2241 (ihdlr ? "ihdlr" : "qcomm"),
2242 SCpnt->serial_number, k, flushcount, 2242 SCpnt->serial_number, k, flushcount,
2243 n_ready, SCpnt->request->sector, 2243 n_ready, blk_rq_pos(SCpnt->request),
2244 SCpnt->request->nr_sectors, cursec, YESNO(s), 2244 blk_rq_sectors(SCpnt->request), cursec, YESNO(s),
2245 YESNO(r), YESNO(rev), YESNO(input_only), 2245 YESNO(r), YESNO(rev), YESNO(input_only),
2246 YESNO(overlap), cpp->din); 2246 YESNO(overlap), cpp->din);
2247 } 2247 }
@@ -2408,7 +2408,7 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
2408 2408
2409 if (linked_comm && SCpnt->device->queue_depth > 2 2409 if (linked_comm && SCpnt->device->queue_depth > 2
2410 && TLDEV(SCpnt->device->type)) 2410 && TLDEV(SCpnt->device->type))
2411 flush_dev(SCpnt->device, SCpnt->request->sector, ha, 1); 2411 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 1);
2412 2412
2413 tstatus = status_byte(spp->target_status); 2413 tstatus = status_byte(spp->target_status);
2414 2414
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 03e1926f40b5..0a5609bb5817 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -45,8 +45,6 @@
45 45
46#include "fcoe.h" 46#include "fcoe.h"
47 47
48static int debug_fcoe;
49
50MODULE_AUTHOR("Open-FCoE.org"); 48MODULE_AUTHOR("Open-FCoE.org");
51MODULE_DESCRIPTION("FCoE"); 49MODULE_DESCRIPTION("FCoE");
52MODULE_LICENSE("GPL v2"); 50MODULE_LICENSE("GPL v2");
@@ -54,7 +52,6 @@ MODULE_LICENSE("GPL v2");
54/* fcoe host list */ 52/* fcoe host list */
55LIST_HEAD(fcoe_hostlist); 53LIST_HEAD(fcoe_hostlist);
56DEFINE_RWLOCK(fcoe_hostlist_lock); 54DEFINE_RWLOCK(fcoe_hostlist_lock);
57DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
58DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); 55DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
59 56
60/* Function Prototypes */ 57/* Function Prototypes */
@@ -71,7 +68,7 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
71static int fcoe_hostlist_add(const struct fc_lport *); 68static int fcoe_hostlist_add(const struct fc_lport *);
72static int fcoe_hostlist_remove(const struct fc_lport *); 69static int fcoe_hostlist_remove(const struct fc_lport *);
73 70
74static int fcoe_check_wait_queue(struct fc_lport *); 71static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
75static int fcoe_device_notification(struct notifier_block *, ulong, void *); 72static int fcoe_device_notification(struct notifier_block *, ulong, void *);
76static void fcoe_dev_setup(void); 73static void fcoe_dev_setup(void);
77static void fcoe_dev_cleanup(void); 74static void fcoe_dev_cleanup(void);
@@ -136,6 +133,58 @@ static struct scsi_host_template fcoe_shost_template = {
136}; 133};
137 134
138/** 135/**
136 * fcoe_fip_recv - handle a received FIP frame.
137 * @skb: the receive skb
138 * @dev: associated &net_device
139 * @ptype: the &packet_type structure which was used to register this handler.
140 * @orig_dev: original receive &net_device, in case @dev is a bond.
141 *
142 * Returns: 0 for success
143 */
144static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
145 struct packet_type *ptype,
146 struct net_device *orig_dev)
147{
148 struct fcoe_softc *fc;
149
150 fc = container_of(ptype, struct fcoe_softc, fip_packet_type);
151 fcoe_ctlr_recv(&fc->ctlr, skb);
152 return 0;
153}
154
155/**
156 * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame.
157 * @fip: FCoE controller.
158 * @skb: FIP Packet.
159 */
160static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
161{
162 skb->dev = fcoe_from_ctlr(fip)->real_dev;
163 dev_queue_xmit(skb);
164}
165
166/**
167 * fcoe_update_src_mac() - Update Ethernet MAC filters.
168 * @fip: FCoE controller.
169 * @old: Unicast MAC address to delete if the MAC is non-zero.
170 * @new: Unicast MAC address to add.
171 *
172 * Remove any previously-set unicast MAC filter.
173 * Add secondary FCoE MAC address filter for our OUI.
174 */
175static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new)
176{
177 struct fcoe_softc *fc;
178
179 fc = fcoe_from_ctlr(fip);
180 rtnl_lock();
181 if (!is_zero_ether_addr(old))
182 dev_unicast_delete(fc->real_dev, old);
183 dev_unicast_add(fc->real_dev, new);
184 rtnl_unlock();
185}
186
187/**
139 * fcoe_lport_config() - sets up the fc_lport 188 * fcoe_lport_config() - sets up the fc_lport
140 * @lp: ptr to the fc_lport 189 * @lp: ptr to the fc_lport
141 * 190 *
@@ -146,6 +195,7 @@ static int fcoe_lport_config(struct fc_lport *lp)
146 lp->link_up = 0; 195 lp->link_up = 0;
147 lp->qfull = 0; 196 lp->qfull = 0;
148 lp->max_retry_count = 3; 197 lp->max_retry_count = 3;
198 lp->max_rport_retry_count = 3;
149 lp->e_d_tov = 2 * 1000; /* FC-FS default */ 199 lp->e_d_tov = 2 * 1000; /* FC-FS default */
150 lp->r_a_tov = 2 * 2 * 1000; 200 lp->r_a_tov = 2 * 2 * 1000;
151 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 201 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
@@ -167,6 +217,42 @@ static int fcoe_lport_config(struct fc_lport *lp)
167} 217}
168 218
169/** 219/**
220 * fcoe_netdev_cleanup() - clean up netdev configurations
221 * @fc: ptr to the fcoe_softc
222 */
223void fcoe_netdev_cleanup(struct fcoe_softc *fc)
224{
225 u8 flogi_maddr[ETH_ALEN];
226
227 /* Don't listen for Ethernet packets anymore */
228 dev_remove_pack(&fc->fcoe_packet_type);
229 dev_remove_pack(&fc->fip_packet_type);
230
231 /* Delete secondary MAC addresses */
232 rtnl_lock();
233 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
234 dev_unicast_delete(fc->real_dev, flogi_maddr);
235 if (!is_zero_ether_addr(fc->ctlr.data_src_addr))
236 dev_unicast_delete(fc->real_dev, fc->ctlr.data_src_addr);
237 if (fc->ctlr.spma)
238 dev_unicast_delete(fc->real_dev, fc->ctlr.ctl_src_addr);
239 dev_mc_delete(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
240 rtnl_unlock();
241}
242
243/**
244 * fcoe_queue_timer() - fcoe queue timer
245 * @lp: the fc_lport pointer
246 *
247 * Calls fcoe_check_wait_queue on timeout
248 *
249 */
250static void fcoe_queue_timer(ulong lp)
251{
252 fcoe_check_wait_queue((struct fc_lport *)lp, NULL);
253}
254
255/**
170 * fcoe_netdev_config() - Set up netdev for SW FCoE 256 * fcoe_netdev_config() - Set up netdev for SW FCoE
171 * @lp : ptr to the fc_lport 257 * @lp : ptr to the fc_lport
172 * @netdev : ptr to the associated netdevice struct 258 * @netdev : ptr to the associated netdevice struct
@@ -181,6 +267,7 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
181 u64 wwnn, wwpn; 267 u64 wwnn, wwpn;
182 struct fcoe_softc *fc; 268 struct fcoe_softc *fc;
183 u8 flogi_maddr[ETH_ALEN]; 269 u8 flogi_maddr[ETH_ALEN];
270 struct netdev_hw_addr *ha;
184 271
185 /* Setup lport private data to point to fcoe softc */ 272 /* Setup lport private data to point to fcoe softc */
186 fc = lport_priv(lp); 273 fc = lport_priv(lp);
@@ -216,30 +303,44 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
216#ifdef NETIF_F_FCOE_CRC 303#ifdef NETIF_F_FCOE_CRC
217 if (netdev->features & NETIF_F_FCOE_CRC) { 304 if (netdev->features & NETIF_F_FCOE_CRC) {
218 lp->crc_offload = 1; 305 lp->crc_offload = 1;
219 printk(KERN_DEBUG "fcoe:%s supports FCCRC offload\n", 306 FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
220 netdev->name);
221 } 307 }
222#endif 308#endif
223#ifdef NETIF_F_FSO 309#ifdef NETIF_F_FSO
224 if (netdev->features & NETIF_F_FSO) { 310 if (netdev->features & NETIF_F_FSO) {
225 lp->seq_offload = 1; 311 lp->seq_offload = 1;
226 lp->lso_max = netdev->gso_max_size; 312 lp->lso_max = netdev->gso_max_size;
227 printk(KERN_DEBUG "fcoe:%s supports LSO for max len 0x%x\n", 313 FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
228 netdev->name, lp->lso_max); 314 lp->lso_max);
229 } 315 }
230#endif 316#endif
231 if (netdev->fcoe_ddp_xid) { 317 if (netdev->fcoe_ddp_xid) {
232 lp->lro_enabled = 1; 318 lp->lro_enabled = 1;
233 lp->lro_xid = netdev->fcoe_ddp_xid; 319 lp->lro_xid = netdev->fcoe_ddp_xid;
234 printk(KERN_DEBUG "fcoe:%s supports LRO for max xid 0x%x\n", 320 FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
235 netdev->name, lp->lro_xid); 321 lp->lro_xid);
236 } 322 }
237 skb_queue_head_init(&fc->fcoe_pending_queue); 323 skb_queue_head_init(&fc->fcoe_pending_queue);
238 fc->fcoe_pending_queue_active = 0; 324 fc->fcoe_pending_queue_active = 0;
325 setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp);
326
327 /* look for SAN MAC address, if multiple SAN MACs exist, only
328 * use the first one for SPMA */
329 rcu_read_lock();
330 for_each_dev_addr(netdev, ha) {
331 if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
332 (is_valid_ether_addr(fc->ctlr.ctl_src_addr))) {
333 memcpy(fc->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
334 fc->ctlr.spma = 1;
335 break;
336 }
337 }
338 rcu_read_unlock();
239 339
240 /* setup Source Mac Address */ 340 /* setup Source Mac Address */
241 memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr, 341 if (!fc->ctlr.spma)
242 fc->real_dev->addr_len); 342 memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr,
343 fc->real_dev->addr_len);
243 344
244 wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0); 345 wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
245 fc_set_wwnn(lp, wwnn); 346 fc_set_wwnn(lp, wwnn);
@@ -254,7 +355,9 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
254 */ 355 */
255 rtnl_lock(); 356 rtnl_lock();
256 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); 357 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
257 dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN); 358 dev_unicast_add(fc->real_dev, flogi_maddr);
359 if (fc->ctlr.spma)
360 dev_unicast_add(fc->real_dev, fc->ctlr.ctl_src_addr);
258 dev_mc_add(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); 361 dev_mc_add(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
259 rtnl_unlock(); 362 rtnl_unlock();
260 363
@@ -267,6 +370,11 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
267 fc->fcoe_packet_type.dev = fc->real_dev; 370 fc->fcoe_packet_type.dev = fc->real_dev;
268 dev_add_pack(&fc->fcoe_packet_type); 371 dev_add_pack(&fc->fcoe_packet_type);
269 372
373 fc->fip_packet_type.func = fcoe_fip_recv;
374 fc->fip_packet_type.type = htons(ETH_P_FIP);
375 fc->fip_packet_type.dev = fc->real_dev;
376 dev_add_pack(&fc->fip_packet_type);
377
270 return 0; 378 return 0;
271} 379}
272 380
@@ -296,7 +404,8 @@ static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
296 /* add the new host to the SCSI-ml */ 404 /* add the new host to the SCSI-ml */
297 rc = scsi_add_host(lp->host, dev); 405 rc = scsi_add_host(lp->host, dev);
298 if (rc) { 406 if (rc) {
299 FC_DBG("fcoe_shost_config:error on scsi_add_host\n"); 407 FCOE_NETDEV_DBG(fcoe_netdev(lp), "fcoe_shost_config: "
408 "error on scsi_add_host\n");
300 return rc; 409 return rc;
301 } 410 }
302 sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s", 411 sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
@@ -334,12 +443,10 @@ static int fcoe_if_destroy(struct net_device *netdev)
334{ 443{
335 struct fc_lport *lp = NULL; 444 struct fc_lport *lp = NULL;
336 struct fcoe_softc *fc; 445 struct fcoe_softc *fc;
337 u8 flogi_maddr[ETH_ALEN];
338 446
339 BUG_ON(!netdev); 447 BUG_ON(!netdev);
340 448
341 printk(KERN_DEBUG "fcoe_if_destroy:interface on %s\n", 449 FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
342 netdev->name);
343 450
344 lp = fcoe_hostlist_lookup(netdev); 451 lp = fcoe_hostlist_lookup(netdev);
345 if (!lp) 452 if (!lp)
@@ -353,9 +460,10 @@ static int fcoe_if_destroy(struct net_device *netdev)
353 /* Remove the instance from fcoe's list */ 460 /* Remove the instance from fcoe's list */
354 fcoe_hostlist_remove(lp); 461 fcoe_hostlist_remove(lp);
355 462
356 /* Don't listen for Ethernet packets anymore */ 463 /* clean up netdev configurations */
357 dev_remove_pack(&fc->fcoe_packet_type); 464 fcoe_netdev_cleanup(fc);
358 dev_remove_pack(&fc->fip_packet_type); 465
466 /* tear-down the FCoE controller */
359 fcoe_ctlr_destroy(&fc->ctlr); 467 fcoe_ctlr_destroy(&fc->ctlr);
360 468
361 /* Cleanup the fc_lport */ 469 /* Cleanup the fc_lport */
@@ -370,22 +478,15 @@ static int fcoe_if_destroy(struct net_device *netdev)
370 if (lp->emp) 478 if (lp->emp)
371 fc_exch_mgr_free(lp->emp); 479 fc_exch_mgr_free(lp->emp);
372 480
373 /* Delete secondary MAC addresses */
374 rtnl_lock();
375 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
376 dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
377 if (!is_zero_ether_addr(fc->ctlr.data_src_addr))
378 dev_unicast_delete(fc->real_dev,
379 fc->ctlr.data_src_addr, ETH_ALEN);
380 dev_mc_delete(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
381 rtnl_unlock();
382
383 /* Free the per-CPU receive threads */ 481 /* Free the per-CPU receive threads */
384 fcoe_percpu_clean(lp); 482 fcoe_percpu_clean(lp);
385 483
386 /* Free existing skbs */ 484 /* Free existing skbs */
387 fcoe_clean_pending_queue(lp); 485 fcoe_clean_pending_queue(lp);
388 486
487 /* Stop the timer */
488 del_timer_sync(&fc->timer);
489
389 /* Free memory used by statistical counters */ 490 /* Free memory used by statistical counters */
390 fc_lport_free_stats(lp); 491 fc_lport_free_stats(lp);
391 492
@@ -439,58 +540,6 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
439}; 540};
440 541
441/** 542/**
442 * fcoe_fip_recv - handle a received FIP frame.
443 * @skb: the receive skb
444 * @dev: associated &net_device
445 * @ptype: the &packet_type structure which was used to register this handler.
446 * @orig_dev: original receive &net_device, in case @dev is a bond.
447 *
448 * Returns: 0 for success
449 */
450static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
451 struct packet_type *ptype,
452 struct net_device *orig_dev)
453{
454 struct fcoe_softc *fc;
455
456 fc = container_of(ptype, struct fcoe_softc, fip_packet_type);
457 fcoe_ctlr_recv(&fc->ctlr, skb);
458 return 0;
459}
460
461/**
462 * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame.
463 * @fip: FCoE controller.
464 * @skb: FIP Packet.
465 */
466static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
467{
468 skb->dev = fcoe_from_ctlr(fip)->real_dev;
469 dev_queue_xmit(skb);
470}
471
472/**
473 * fcoe_update_src_mac() - Update Ethernet MAC filters.
474 * @fip: FCoE controller.
475 * @old: Unicast MAC address to delete if the MAC is non-zero.
476 * @new: Unicast MAC address to add.
477 *
478 * Remove any previously-set unicast MAC filter.
479 * Add secondary FCoE MAC address filter for our OUI.
480 */
481static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new)
482{
483 struct fcoe_softc *fc;
484
485 fc = fcoe_from_ctlr(fip);
486 rtnl_lock();
487 if (!is_zero_ether_addr(old))
488 dev_unicast_delete(fc->real_dev, old, ETH_ALEN);
489 dev_unicast_add(fc->real_dev, new, ETH_ALEN);
490 rtnl_unlock();
491}
492
493/**
494 * fcoe_if_create() - this function creates the fcoe interface 543 * fcoe_if_create() - this function creates the fcoe interface
495 * @netdev: pointer the associated netdevice 544 * @netdev: pointer the associated netdevice
496 * 545 *
@@ -508,8 +557,7 @@ static int fcoe_if_create(struct net_device *netdev)
508 557
509 BUG_ON(!netdev); 558 BUG_ON(!netdev);
510 559
511 printk(KERN_DEBUG "fcoe_if_create:interface on %s\n", 560 FCOE_NETDEV_DBG(netdev, "Create Interface\n");
512 netdev->name);
513 561
514 lp = fcoe_hostlist_lookup(netdev); 562 lp = fcoe_hostlist_lookup(netdev);
515 if (lp) 563 if (lp)
@@ -518,7 +566,7 @@ static int fcoe_if_create(struct net_device *netdev)
518 shost = libfc_host_alloc(&fcoe_shost_template, 566 shost = libfc_host_alloc(&fcoe_shost_template,
519 sizeof(struct fcoe_softc)); 567 sizeof(struct fcoe_softc));
520 if (!shost) { 568 if (!shost) {
521 FC_DBG("Could not allocate host structure\n"); 569 FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
522 return -ENOMEM; 570 return -ENOMEM;
523 } 571 }
524 lp = shost_priv(shost); 572 lp = shost_priv(shost);
@@ -527,14 +575,8 @@ static int fcoe_if_create(struct net_device *netdev)
527 /* configure fc_lport, e.g., em */ 575 /* configure fc_lport, e.g., em */
528 rc = fcoe_lport_config(lp); 576 rc = fcoe_lport_config(lp);
529 if (rc) { 577 if (rc) {
530 FC_DBG("Could not configure lport\n"); 578 FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
531 goto out_host_put; 579 "interface\n");
532 }
533
534 /* configure lport network properties */
535 rc = fcoe_netdev_config(lp, netdev);
536 if (rc) {
537 FC_DBG("Could not configure netdev for lport\n");
538 goto out_host_put; 580 goto out_host_put;
539 } 581 }
540 582
@@ -545,29 +587,35 @@ static int fcoe_if_create(struct net_device *netdev)
545 fc->ctlr.send = fcoe_fip_send; 587 fc->ctlr.send = fcoe_fip_send;
546 fc->ctlr.update_mac = fcoe_update_src_mac; 588 fc->ctlr.update_mac = fcoe_update_src_mac;
547 589
548 fc->fip_packet_type.func = fcoe_fip_recv; 590 /* configure lport network properties */
549 fc->fip_packet_type.type = htons(ETH_P_FIP); 591 rc = fcoe_netdev_config(lp, netdev);
550 fc->fip_packet_type.dev = fc->real_dev; 592 if (rc) {
551 dev_add_pack(&fc->fip_packet_type); 593 FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
594 "interface\n");
595 goto out_netdev_cleanup;
596 }
552 597
553 /* configure lport scsi host properties */ 598 /* configure lport scsi host properties */
554 rc = fcoe_shost_config(lp, shost, &netdev->dev); 599 rc = fcoe_shost_config(lp, shost, &netdev->dev);
555 if (rc) { 600 if (rc) {
556 FC_DBG("Could not configure shost for lport\n"); 601 FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
557 goto out_host_put; 602 "interface\n");
603 goto out_netdev_cleanup;
558 } 604 }
559 605
560 /* lport exch manager allocation */ 606 /* lport exch manager allocation */
561 rc = fcoe_em_config(lp); 607 rc = fcoe_em_config(lp);
562 if (rc) { 608 if (rc) {
563 FC_DBG("Could not configure em for lport\n"); 609 FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the "
564 goto out_host_put; 610 "interface\n");
611 goto out_netdev_cleanup;
565 } 612 }
566 613
567 /* Initialize the library */ 614 /* Initialize the library */
568 rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ); 615 rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ);
569 if (rc) { 616 if (rc) {
570 FC_DBG("Could not configure libfc for lport!\n"); 617 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
618 "interface\n");
571 goto out_lp_destroy; 619 goto out_lp_destroy;
572 } 620 }
573 621
@@ -587,6 +635,8 @@ static int fcoe_if_create(struct net_device *netdev)
587 635
588out_lp_destroy: 636out_lp_destroy:
589 fc_exch_mgr_free(lp->emp); /* Free the EM */ 637 fc_exch_mgr_free(lp->emp); /* Free the EM */
638out_netdev_cleanup:
639 fcoe_netdev_cleanup(fc);
590out_host_put: 640out_host_put:
591 scsi_host_put(lp->host); 641 scsi_host_put(lp->host);
592 return rc; 642 return rc;
@@ -604,7 +654,7 @@ static int __init fcoe_if_init(void)
604 fc_attach_transport(&fcoe_transport_function); 654 fc_attach_transport(&fcoe_transport_function);
605 655
606 if (!scsi_transport_fcoe_sw) { 656 if (!scsi_transport_fcoe_sw) {
607 printk(KERN_ERR "fcoe_init:fc_attach_transport() failed\n"); 657 printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
608 return -ENODEV; 658 return -ENODEV;
609 } 659 }
610 660
@@ -665,7 +715,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
665 unsigned targ_cpu = smp_processor_id(); 715 unsigned targ_cpu = smp_processor_id();
666#endif /* CONFIG_SMP */ 716#endif /* CONFIG_SMP */
667 717
668 printk(KERN_DEBUG "fcoe: Destroying receive thread for CPU %d\n", cpu); 718 FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
669 719
670 /* Prevent any new skbs from being queued for this CPU. */ 720 /* Prevent any new skbs from being queued for this CPU. */
671 p = &per_cpu(fcoe_percpu, cpu); 721 p = &per_cpu(fcoe_percpu, cpu);
@@ -687,8 +737,8 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
687 p0 = &per_cpu(fcoe_percpu, targ_cpu); 737 p0 = &per_cpu(fcoe_percpu, targ_cpu);
688 spin_lock_bh(&p0->fcoe_rx_list.lock); 738 spin_lock_bh(&p0->fcoe_rx_list.lock);
689 if (p0->thread) { 739 if (p0->thread) {
690 FC_DBG("Moving frames from CPU %d to CPU %d\n", 740 FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
691 cpu, targ_cpu); 741 cpu, targ_cpu);
692 742
693 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) 743 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
694 __skb_queue_tail(&p0->fcoe_rx_list, skb); 744 __skb_queue_tail(&p0->fcoe_rx_list, skb);
@@ -754,12 +804,12 @@ static int fcoe_cpu_callback(struct notifier_block *nfb,
754 switch (action) { 804 switch (action) {
755 case CPU_ONLINE: 805 case CPU_ONLINE:
756 case CPU_ONLINE_FROZEN: 806 case CPU_ONLINE_FROZEN:
757 FC_DBG("CPU %x online: Create Rx thread\n", cpu); 807 FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
758 fcoe_percpu_thread_create(cpu); 808 fcoe_percpu_thread_create(cpu);
759 break; 809 break;
760 case CPU_DEAD: 810 case CPU_DEAD:
761 case CPU_DEAD_FROZEN: 811 case CPU_DEAD_FROZEN:
762 FC_DBG("CPU %x offline: Remove Rx thread\n", cpu); 812 FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
763 fcoe_percpu_thread_destroy(cpu); 813 fcoe_percpu_thread_destroy(cpu);
764 break; 814 break;
765 default: 815 default:
@@ -797,24 +847,21 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
797 fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type); 847 fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
798 lp = fc->ctlr.lp; 848 lp = fc->ctlr.lp;
799 if (unlikely(lp == NULL)) { 849 if (unlikely(lp == NULL)) {
800 FC_DBG("cannot find hba structure"); 850 FCOE_NETDEV_DBG(dev, "Cannot find hba structure");
801 goto err2; 851 goto err2;
802 } 852 }
803 if (!lp->link_up) 853 if (!lp->link_up)
804 goto err2; 854 goto err2;
805 855
806 if (unlikely(debug_fcoe)) { 856 FCOE_NETDEV_DBG(dev, "skb_info: len:%d data_len:%d head:%p "
807 FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p " 857 "data:%p tail:%p end:%p sum:%d dev:%s",
808 "end:%p sum:%d dev:%s", skb->len, skb->data_len, 858 skb->len, skb->data_len, skb->head, skb->data,
809 skb->head, skb->data, skb_tail_pointer(skb), 859 skb_tail_pointer(skb), skb_end_pointer(skb),
810 skb_end_pointer(skb), skb->csum, 860 skb->csum, skb->dev ? skb->dev->name : "<NULL>");
811 skb->dev ? skb->dev->name : "<NULL>");
812
813 }
814 861
815 /* check for FCOE packet type */ 862 /* check for FCOE packet type */
816 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { 863 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
817 FC_DBG("wrong FC type frame"); 864 FCOE_NETDEV_DBG(dev, "Wrong FC type frame");
818 goto err; 865 goto err;
819 } 866 }
820 867
@@ -852,8 +899,9 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
852 * the first CPU now. For non-SMP systems this 899 * the first CPU now. For non-SMP systems this
853 * will check the same CPU twice. 900 * will check the same CPU twice.
854 */ 901 */
855 FC_DBG("CPU is online, but no receive thread ready " 902 FCOE_NETDEV_DBG(dev, "CPU is online, but no receive thread "
856 "for incoming skb- using first online CPU.\n"); 903 "ready for incoming skb- using first online "
904 "CPU.\n");
857 905
858 spin_unlock_bh(&fps->fcoe_rx_list.lock); 906 spin_unlock_bh(&fps->fcoe_rx_list.lock);
859 cpu = first_cpu(cpu_online_map); 907 cpu = first_cpu(cpu_online_map);
@@ -988,7 +1036,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
988 */ 1036 */
989int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) 1037int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
990{ 1038{
991 int wlen, rc = 0; 1039 int wlen;
992 u32 crc; 1040 u32 crc;
993 struct ethhdr *eh; 1041 struct ethhdr *eh;
994 struct fcoe_crc_eof *cp; 1042 struct fcoe_crc_eof *cp;
@@ -1021,8 +1069,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1021 sof = fr_sof(fp); 1069 sof = fr_sof(fp);
1022 eof = fr_eof(fp); 1070 eof = fr_eof(fp);
1023 1071
1024 elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ? 1072 elen = sizeof(struct ethhdr);
1025 sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
1026 hlen = sizeof(struct fcoe_hdr); 1073 hlen = sizeof(struct fcoe_hdr);
1027 tlen = sizeof(struct fcoe_crc_eof); 1074 tlen = sizeof(struct fcoe_crc_eof);
1028 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; 1075 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
@@ -1107,18 +1154,9 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1107 /* send down to lld */ 1154 /* send down to lld */
1108 fr_dev(fp) = lp; 1155 fr_dev(fp) = lp;
1109 if (fc->fcoe_pending_queue.qlen) 1156 if (fc->fcoe_pending_queue.qlen)
1110 rc = fcoe_check_wait_queue(lp); 1157 fcoe_check_wait_queue(lp, skb);
1111 1158 else if (fcoe_start_io(skb))
1112 if (rc == 0) 1159 fcoe_check_wait_queue(lp, skb);
1113 rc = fcoe_start_io(skb);
1114
1115 if (rc) {
1116 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1117 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1118 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1119 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1120 lp->qfull = 1;
1121 }
1122 1160
1123 return 0; 1161 return 0;
1124} 1162}
@@ -1162,19 +1200,17 @@ int fcoe_percpu_receive_thread(void *arg)
1162 fr = fcoe_dev_from_skb(skb); 1200 fr = fcoe_dev_from_skb(skb);
1163 lp = fr->fr_dev; 1201 lp = fr->fr_dev;
1164 if (unlikely(lp == NULL)) { 1202 if (unlikely(lp == NULL)) {
1165 FC_DBG("invalid HBA Structure"); 1203 FCOE_NETDEV_DBG(skb->dev, "Invalid HBA Structure");
1166 kfree_skb(skb); 1204 kfree_skb(skb);
1167 continue; 1205 continue;
1168 } 1206 }
1169 1207
1170 if (unlikely(debug_fcoe)) { 1208 FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
1171 FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p " 1209 "head:%p data:%p tail:%p end:%p sum:%d dev:%s",
1172 "tail:%p end:%p sum:%d dev:%s", 1210 skb->len, skb->data_len,
1173 skb->len, skb->data_len, 1211 skb->head, skb->data, skb_tail_pointer(skb),
1174 skb->head, skb->data, skb_tail_pointer(skb), 1212 skb_end_pointer(skb), skb->csum,
1175 skb_end_pointer(skb), skb->csum, 1213 skb->dev ? skb->dev->name : "<NULL>");
1176 skb->dev ? skb->dev->name : "<NULL>");
1177 }
1178 1214
1179 /* 1215 /*
1180 * Save source MAC address before discarding header. 1216 * Save source MAC address before discarding header.
@@ -1194,7 +1230,7 @@ int fcoe_percpu_receive_thread(void *arg)
1194 stats = fc_lport_get_stats(lp); 1230 stats = fc_lport_get_stats(lp);
1195 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { 1231 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
1196 if (stats->ErrorFrames < 5) 1232 if (stats->ErrorFrames < 5)
1197 printk(KERN_WARNING "FCoE version " 1233 printk(KERN_WARNING "fcoe: FCoE version "
1198 "mismatch: The frame has " 1234 "mismatch: The frame has "
1199 "version %x, but the " 1235 "version %x, but the "
1200 "initiator supports version " 1236 "initiator supports version "
@@ -1247,7 +1283,7 @@ int fcoe_percpu_receive_thread(void *arg)
1247 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { 1283 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
1248 if (le32_to_cpu(fr_crc(fp)) != 1284 if (le32_to_cpu(fr_crc(fp)) !=
1249 ~crc32(~0, skb->data, fr_len)) { 1285 ~crc32(~0, skb->data, fr_len)) {
1250 if (debug_fcoe || stats->InvalidCRCCount < 5) 1286 if (stats->InvalidCRCCount < 5)
1251 printk(KERN_WARNING "fcoe: dropping " 1287 printk(KERN_WARNING "fcoe: dropping "
1252 "frame with CRC error\n"); 1288 "frame with CRC error\n");
1253 stats->InvalidCRCCount++; 1289 stats->InvalidCRCCount++;
@@ -1268,32 +1304,6 @@ int fcoe_percpu_receive_thread(void *arg)
1268} 1304}
1269 1305
1270/** 1306/**
1271 * fcoe_watchdog() - fcoe timer callback
1272 * @vp:
1273 *
1274 * This checks the pending queue length for fcoe and set lport qfull
1275 * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
1276 * fcoe_hostlist.
1277 *
1278 * Returns: 0 for success
1279 */
1280void fcoe_watchdog(ulong vp)
1281{
1282 struct fcoe_softc *fc;
1283
1284 read_lock(&fcoe_hostlist_lock);
1285 list_for_each_entry(fc, &fcoe_hostlist, list) {
1286 if (fc->ctlr.lp)
1287 fcoe_check_wait_queue(fc->ctlr.lp);
1288 }
1289 read_unlock(&fcoe_hostlist_lock);
1290
1291 fcoe_timer.expires = jiffies + (1 * HZ);
1292 add_timer(&fcoe_timer);
1293}
1294
1295
1296/**
1297 * fcoe_check_wait_queue() - attempt to clear the transmit backlog 1307 * fcoe_check_wait_queue() - attempt to clear the transmit backlog
1298 * @lp: the fc_lport 1308 * @lp: the fc_lport
1299 * 1309 *
@@ -1305,16 +1315,17 @@ void fcoe_watchdog(ulong vp)
1305 * The wait_queue is used when the skb transmit fails. skb will go 1315 * The wait_queue is used when the skb transmit fails. skb will go
1306 * in the wait_queue which will be emptied by the timer function or 1316 * in the wait_queue which will be emptied by the timer function or
1307 * by the next skb transmit. 1317 * by the next skb transmit.
1308 *
1309 * Returns: 0 for success
1310 */ 1318 */
1311static int fcoe_check_wait_queue(struct fc_lport *lp) 1319static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
1312{ 1320{
1313 struct fcoe_softc *fc = lport_priv(lp); 1321 struct fcoe_softc *fc = lport_priv(lp);
1314 struct sk_buff *skb; 1322 int rc;
1315 int rc = -1;
1316 1323
1317 spin_lock_bh(&fc->fcoe_pending_queue.lock); 1324 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1325
1326 if (skb)
1327 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1328
1318 if (fc->fcoe_pending_queue_active) 1329 if (fc->fcoe_pending_queue_active)
1319 goto out; 1330 goto out;
1320 fc->fcoe_pending_queue_active = 1; 1331 fc->fcoe_pending_queue_active = 1;
@@ -1340,23 +1351,26 @@ static int fcoe_check_wait_queue(struct fc_lport *lp)
1340 1351
1341 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) 1352 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
1342 lp->qfull = 0; 1353 lp->qfull = 0;
1354 if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer))
1355 mod_timer(&fc->timer, jiffies + 2);
1343 fc->fcoe_pending_queue_active = 0; 1356 fc->fcoe_pending_queue_active = 0;
1344 rc = fc->fcoe_pending_queue.qlen;
1345out: 1357out:
1358 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1359 lp->qfull = 1;
1346 spin_unlock_bh(&fc->fcoe_pending_queue.lock); 1360 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1347 return rc; 1361 return;
1348} 1362}
1349 1363
1350/** 1364/**
1351 * fcoe_dev_setup() - setup link change notification interface 1365 * fcoe_dev_setup() - setup link change notification interface
1352 */ 1366 */
1353static void fcoe_dev_setup() 1367static void fcoe_dev_setup(void)
1354{ 1368{
1355 register_netdevice_notifier(&fcoe_notifier); 1369 register_netdevice_notifier(&fcoe_notifier);
1356} 1370}
1357 1371
1358/** 1372/**
1359 * fcoe_dev_setup() - cleanup link change notification interface 1373 * fcoe_dev_cleanup() - cleanup link change notification interface
1360 */ 1374 */
1361static void fcoe_dev_cleanup(void) 1375static void fcoe_dev_cleanup(void)
1362{ 1376{
@@ -1415,7 +1429,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1415 case NETDEV_REGISTER: 1429 case NETDEV_REGISTER:
1416 break; 1430 break;
1417 default: 1431 default:
1418 FC_DBG("Unknown event %ld from netdev netlink\n", event); 1432 FCOE_NETDEV_DBG(real_dev, "Unknown event %ld "
1433 "from netdev netlink\n", event);
1419 } 1434 }
1420 if (link_possible && !fcoe_link_ok(lp)) 1435 if (link_possible && !fcoe_link_ok(lp))
1421 fcoe_ctlr_link_up(&fc->ctlr); 1436 fcoe_ctlr_link_up(&fc->ctlr);
@@ -1488,8 +1503,8 @@ static int fcoe_ethdrv_get(const struct net_device *netdev)
1488 1503
1489 owner = fcoe_netdev_to_module_owner(netdev); 1504 owner = fcoe_netdev_to_module_owner(netdev);
1490 if (owner) { 1505 if (owner) {
1491 printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n", 1506 FCOE_NETDEV_DBG(netdev, "Hold driver module %s\n",
1492 module_name(owner), netdev->name); 1507 module_name(owner));
1493 return try_module_get(owner); 1508 return try_module_get(owner);
1494 } 1509 }
1495 return -ENODEV; 1510 return -ENODEV;
@@ -1510,8 +1525,8 @@ static int fcoe_ethdrv_put(const struct net_device *netdev)
1510 1525
1511 owner = fcoe_netdev_to_module_owner(netdev); 1526 owner = fcoe_netdev_to_module_owner(netdev);
1512 if (owner) { 1527 if (owner) {
1513 printk(KERN_DEBUG "fcoe:release driver module %s for %s\n", 1528 FCOE_NETDEV_DBG(netdev, "Release driver module %s\n",
1514 module_name(owner), netdev->name); 1529 module_name(owner));
1515 module_put(owner); 1530 module_put(owner);
1516 return 0; 1531 return 0;
1517 } 1532 }
@@ -1542,7 +1557,7 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
1542 } 1557 }
1543 rc = fcoe_if_destroy(netdev); 1558 rc = fcoe_if_destroy(netdev);
1544 if (rc) { 1559 if (rc) {
1545 printk(KERN_ERR "fcoe: fcoe_if_destroy(%s) failed\n", 1560 printk(KERN_ERR "fcoe: Failed to destroy interface (%s)\n",
1546 netdev->name); 1561 netdev->name);
1547 rc = -EIO; 1562 rc = -EIO;
1548 goto out_putdev; 1563 goto out_putdev;
@@ -1581,7 +1596,7 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
1581 1596
1582 rc = fcoe_if_create(netdev); 1597 rc = fcoe_if_create(netdev);
1583 if (rc) { 1598 if (rc) {
1584 printk(KERN_ERR "fcoe: fcoe_if_create(%s) failed\n", 1599 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
1585 netdev->name); 1600 netdev->name);
1586 fcoe_ethdrv_put(netdev); 1601 fcoe_ethdrv_put(netdev);
1587 rc = -EIO; 1602 rc = -EIO;
@@ -1815,10 +1830,6 @@ static int __init fcoe_init(void)
1815 /* Setup link change notification */ 1830 /* Setup link change notification */
1816 fcoe_dev_setup(); 1831 fcoe_dev_setup();
1817 1832
1818 setup_timer(&fcoe_timer, fcoe_watchdog, 0);
1819
1820 mod_timer(&fcoe_timer, jiffies + (10 * HZ));
1821
1822 fcoe_if_init(); 1833 fcoe_if_init();
1823 1834
1824 return 0; 1835 return 0;
@@ -1844,9 +1855,6 @@ static void __exit fcoe_exit(void)
1844 1855
1845 fcoe_dev_cleanup(); 1856 fcoe_dev_cleanup();
1846 1857
1847 /* Stop the timer */
1848 del_timer_sync(&fcoe_timer);
1849
1850 /* releases the associated fcoe hosts */ 1858 /* releases the associated fcoe hosts */
1851 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) 1859 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
1852 fcoe_if_destroy(fc->real_dev); 1860 fcoe_if_destroy(fc->real_dev);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 917aae886897..0d724fa0898f 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -40,6 +40,30 @@
40#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */ 40#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */
41#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */ 41#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */
42 42
43unsigned int fcoe_debug_logging;
44module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
45MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
46
47#define FCOE_LOGGING 0x01 /* General logging, not categorized */
48#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
49
50#define FCOE_CHECK_LOGGING(LEVEL, CMD) \
51do { \
52 if (unlikely(fcoe_debug_logging & LEVEL)) \
53 do { \
54 CMD; \
55 } while (0); \
56} while (0);
57
58#define FCOE_DBG(fmt, args...) \
59 FCOE_CHECK_LOGGING(FCOE_LOGGING, \
60 printk(KERN_INFO "fcoe: " fmt, ##args);)
61
62#define FCOE_NETDEV_DBG(netdev, fmt, args...) \
63 FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \
64 printk(KERN_INFO "fcoe: %s" fmt, \
65 netdev->name, ##args);)
66
43/* 67/*
44 * this percpu struct for fcoe 68 * this percpu struct for fcoe
45 */ 69 */
@@ -61,6 +85,7 @@ struct fcoe_softc {
61 struct packet_type fip_packet_type; 85 struct packet_type fip_packet_type;
62 struct sk_buff_head fcoe_pending_queue; 86 struct sk_buff_head fcoe_pending_queue;
63 u8 fcoe_pending_queue_active; 87 u8 fcoe_pending_queue_active;
88 struct timer_list timer; /* queue timer */
64 struct fcoe_ctlr ctlr; 89 struct fcoe_ctlr ctlr;
65}; 90};
66 91
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 62ba0f39c6bd..f544340d318b 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -56,15 +56,28 @@ static void fcoe_ctlr_recv_work(struct work_struct *);
56 56
57static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; 57static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
58 58
59static u32 fcoe_ctlr_debug; /* 1 for basic, 2 for noisy debug */ 59unsigned int libfcoe_debug_logging;
60module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
61MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
60 62
61#define FIP_DBG_LVL(level, fmt, args...) \ 63#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
64#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
65
66#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
67do { \
68 if (unlikely(libfcoe_debug_logging & LEVEL)) \
62 do { \ 69 do { \
63 if (fcoe_ctlr_debug >= (level)) \ 70 CMD; \
64 FC_DBG(fmt, ##args); \ 71 } while (0); \
65 } while (0) 72} while (0);
73
74#define LIBFCOE_DBG(fmt, args...) \
75 LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
76 printk(KERN_INFO "libfcoe: " fmt, ##args);)
66 77
67#define FIP_DBG(fmt, args...) FIP_DBG_LVL(1, fmt, ##args) 78#define LIBFCOE_FIP_DBG(fmt, args...) \
79 LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
80 printk(KERN_INFO "fip: " fmt, ##args);)
68 81
69/* 82/*
70 * Return non-zero if FCF fcoe_size has been validated. 83 * Return non-zero if FCF fcoe_size has been validated.
@@ -198,6 +211,8 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
198 sol->fip.fip_subcode = FIP_SC_SOL; 211 sol->fip.fip_subcode = FIP_SC_SOL;
199 sol->fip.fip_dl_len = htons(sizeof(sol->desc) / FIP_BPW); 212 sol->fip.fip_dl_len = htons(sizeof(sol->desc) / FIP_BPW);
200 sol->fip.fip_flags = htons(FIP_FL_FPMA); 213 sol->fip.fip_flags = htons(FIP_FL_FPMA);
214 if (fip->spma)
215 sol->fip.fip_flags |= htons(FIP_FL_SPMA);
201 216
202 sol->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; 217 sol->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
203 sol->desc.mac.fd_desc.fip_dlen = sizeof(sol->desc.mac) / FIP_BPW; 218 sol->desc.mac.fd_desc.fip_dlen = sizeof(sol->desc.mac) / FIP_BPW;
@@ -213,7 +228,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
213 sol->desc.size.fd_size = htons(fcoe_size); 228 sol->desc.size.fd_size = htons(fcoe_size);
214 229
215 skb_put(skb, sizeof(*sol)); 230 skb_put(skb, sizeof(*sol));
216 skb->protocol = htons(ETH_P_802_3); 231 skb->protocol = htons(ETH_P_FIP);
217 skb_reset_mac_header(skb); 232 skb_reset_mac_header(skb);
218 skb_reset_network_header(skb); 233 skb_reset_network_header(skb);
219 fip->send(fip, skb); 234 fip->send(fip, skb);
@@ -241,7 +256,7 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
241 fip->last_link = 1; 256 fip->last_link = 1;
242 fip->link = 1; 257 fip->link = 1;
243 spin_unlock_bh(&fip->lock); 258 spin_unlock_bh(&fip->lock);
244 FIP_DBG("%s", "setting AUTO mode.\n"); 259 LIBFCOE_FIP_DBG("%s", "setting AUTO mode.\n");
245 fc_linkup(fip->lp); 260 fc_linkup(fip->lp);
246 fcoe_ctlr_solicit(fip, NULL); 261 fcoe_ctlr_solicit(fip, NULL);
247 } else 262 } else
@@ -350,6 +365,8 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
350 kal->fip.fip_dl_len = htons((sizeof(kal->mac) + 365 kal->fip.fip_dl_len = htons((sizeof(kal->mac) +
351 ports * sizeof(*vn)) / FIP_BPW); 366 ports * sizeof(*vn)) / FIP_BPW);
352 kal->fip.fip_flags = htons(FIP_FL_FPMA); 367 kal->fip.fip_flags = htons(FIP_FL_FPMA);
368 if (fip->spma)
369 kal->fip.fip_flags |= htons(FIP_FL_SPMA);
353 370
354 kal->mac.fd_desc.fip_dtype = FIP_DT_MAC; 371 kal->mac.fd_desc.fip_dtype = FIP_DT_MAC;
355 kal->mac.fd_desc.fip_dlen = sizeof(kal->mac) / FIP_BPW; 372 kal->mac.fd_desc.fip_dlen = sizeof(kal->mac) / FIP_BPW;
@@ -365,7 +382,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
365 } 382 }
366 383
367 skb_put(skb, len); 384 skb_put(skb, len);
368 skb->protocol = htons(ETH_P_802_3); 385 skb->protocol = htons(ETH_P_FIP);
369 skb_reset_mac_header(skb); 386 skb_reset_mac_header(skb);
370 skb_reset_network_header(skb); 387 skb_reset_network_header(skb);
371 fip->send(fip, skb); 388 fip->send(fip, skb);
@@ -413,6 +430,8 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
413 cap->fip.fip_subcode = FIP_SC_REQ; 430 cap->fip.fip_subcode = FIP_SC_REQ;
414 cap->fip.fip_dl_len = htons((dlen + sizeof(*mac)) / FIP_BPW); 431 cap->fip.fip_dl_len = htons((dlen + sizeof(*mac)) / FIP_BPW);
415 cap->fip.fip_flags = htons(FIP_FL_FPMA); 432 cap->fip.fip_flags = htons(FIP_FL_FPMA);
433 if (fip->spma)
434 cap->fip.fip_flags |= htons(FIP_FL_SPMA);
416 435
417 cap->encaps.fd_desc.fip_dtype = dtype; 436 cap->encaps.fd_desc.fip_dtype = dtype;
418 cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW; 437 cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW;
@@ -421,10 +440,12 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
421 memset(mac, 0, sizeof(mac)); 440 memset(mac, 0, sizeof(mac));
422 mac->fd_desc.fip_dtype = FIP_DT_MAC; 441 mac->fd_desc.fip_dtype = FIP_DT_MAC;
423 mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW; 442 mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW;
424 if (dtype != ELS_FLOGI) 443 if (dtype != FIP_DT_FLOGI)
425 memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN); 444 memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN);
445 else if (fip->spma)
446 memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN);
426 447
427 skb->protocol = htons(ETH_P_802_3); 448 skb->protocol = htons(ETH_P_FIP);
428 skb_reset_mac_header(skb); 449 skb_reset_mac_header(skb);
429 skb_reset_network_header(skb); 450 skb_reset_network_header(skb);
430 return 0; 451 return 0;
@@ -447,14 +468,10 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
447 u16 old_xid; 468 u16 old_xid;
448 u8 op; 469 u8 op;
449 470
450 if (fip->state == FIP_ST_NON_FIP)
451 return 0;
452
453 fh = (struct fc_frame_header *)skb->data; 471 fh = (struct fc_frame_header *)skb->data;
454 op = *(u8 *)(fh + 1); 472 op = *(u8 *)(fh + 1);
455 473
456 switch (op) { 474 if (op == ELS_FLOGI) {
457 case ELS_FLOGI:
458 old_xid = fip->flogi_oxid; 475 old_xid = fip->flogi_oxid;
459 fip->flogi_oxid = ntohs(fh->fh_ox_id); 476 fip->flogi_oxid = ntohs(fh->fh_ox_id);
460 if (fip->state == FIP_ST_AUTO) { 477 if (fip->state == FIP_ST_AUTO) {
@@ -466,6 +483,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
466 fip->map_dest = 1; 483 fip->map_dest = 1;
467 return 0; 484 return 0;
468 } 485 }
486 if (fip->state == FIP_ST_NON_FIP)
487 fip->map_dest = 1;
488 }
489
490 if (fip->state == FIP_ST_NON_FIP)
491 return 0;
492
493 switch (op) {
494 case ELS_FLOGI:
469 op = FIP_DT_FLOGI; 495 op = FIP_DT_FLOGI;
470 break; 496 break;
471 case ELS_FDISC: 497 case ELS_FDISC:
@@ -601,7 +627,8 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
601 ((struct fip_mac_desc *)desc)->fd_mac, 627 ((struct fip_mac_desc *)desc)->fd_mac,
602 ETH_ALEN); 628 ETH_ALEN);
603 if (!is_valid_ether_addr(fcf->fcf_mac)) { 629 if (!is_valid_ether_addr(fcf->fcf_mac)) {
604 FIP_DBG("invalid MAC addr in FIP adv\n"); 630 LIBFCOE_FIP_DBG("Invalid MAC address "
631 "in FIP adv\n");
605 return -EINVAL; 632 return -EINVAL;
606 } 633 }
607 break; 634 break;
@@ -634,8 +661,8 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
634 case FIP_DT_LOGO: 661 case FIP_DT_LOGO:
635 case FIP_DT_ELP: 662 case FIP_DT_ELP:
636 default: 663 default:
637 FIP_DBG("unexpected descriptor type %x in FIP adv\n", 664 LIBFCOE_FIP_DBG("unexpected descriptor type %x "
638 desc->fip_dtype); 665 "in FIP adv\n", desc->fip_dtype);
639 /* standard says ignore unknown descriptors >= 128 */ 666 /* standard says ignore unknown descriptors >= 128 */
640 if (desc->fip_dtype < FIP_DT_VENDOR_BASE) 667 if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
641 return -EINVAL; 668 return -EINVAL;
@@ -651,8 +678,8 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
651 return 0; 678 return 0;
652 679
653len_err: 680len_err:
654 FIP_DBG("FIP length error in descriptor type %x len %zu\n", 681 LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n",
655 desc->fip_dtype, dlen); 682 desc->fip_dtype, dlen);
656 return -EINVAL; 683 return -EINVAL;
657} 684}
658 685
@@ -715,9 +742,10 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
715 } 742 }
716 mtu_valid = fcoe_ctlr_mtu_valid(fcf); 743 mtu_valid = fcoe_ctlr_mtu_valid(fcf);
717 fcf->time = jiffies; 744 fcf->time = jiffies;
718 FIP_DBG_LVL(found ? 2 : 1, "%s FCF for fab %llx map %x val %d\n", 745 if (!found) {
719 found ? "old" : "new", 746 LIBFCOE_FIP_DBG("New FCF for fab %llx map %x val %d\n",
720 fcf->fabric_name, fcf->fc_map, mtu_valid); 747 fcf->fabric_name, fcf->fc_map, mtu_valid);
748 }
721 749
722 /* 750 /*
723 * If this advertisement is not solicited and our max receive size 751 * If this advertisement is not solicited and our max receive size
@@ -794,7 +822,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
794 ((struct fip_mac_desc *)desc)->fd_mac, 822 ((struct fip_mac_desc *)desc)->fd_mac,
795 ETH_ALEN); 823 ETH_ALEN);
796 if (!is_valid_ether_addr(granted_mac)) { 824 if (!is_valid_ether_addr(granted_mac)) {
797 FIP_DBG("invalid MAC addrs in FIP ELS\n"); 825 LIBFCOE_FIP_DBG("Invalid MAC address "
826 "in FIP ELS\n");
798 goto drop; 827 goto drop;
799 } 828 }
800 break; 829 break;
@@ -812,8 +841,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
812 els_dtype = desc->fip_dtype; 841 els_dtype = desc->fip_dtype;
813 break; 842 break;
814 default: 843 default:
815 FIP_DBG("unexpected descriptor type %x " 844 LIBFCOE_FIP_DBG("unexpected descriptor type %x "
816 "in FIP adv\n", desc->fip_dtype); 845 "in FIP adv\n", desc->fip_dtype);
817 /* standard says ignore unknown descriptors >= 128 */ 846 /* standard says ignore unknown descriptors >= 128 */
818 if (desc->fip_dtype < FIP_DT_VENDOR_BASE) 847 if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
819 goto drop; 848 goto drop;
@@ -854,8 +883,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
854 return; 883 return;
855 884
856len_err: 885len_err:
857 FIP_DBG("FIP length error in descriptor type %x len %zu\n", 886 LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n",
858 desc->fip_dtype, dlen); 887 desc->fip_dtype, dlen);
859drop: 888drop:
860 kfree_skb(skb); 889 kfree_skb(skb);
861} 890}
@@ -881,7 +910,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
881 struct fc_lport *lp = fip->lp; 910 struct fc_lport *lp = fip->lp;
882 u32 desc_mask; 911 u32 desc_mask;
883 912
884 FIP_DBG("Clear Virtual Link received\n"); 913 LIBFCOE_FIP_DBG("Clear Virtual Link received\n");
885 if (!fcf) 914 if (!fcf)
886 return; 915 return;
887 if (!fcf || !fc_host_port_id(lp->host)) 916 if (!fcf || !fc_host_port_id(lp->host))
@@ -939,9 +968,9 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
939 * reset only if all required descriptors were present and valid. 968 * reset only if all required descriptors were present and valid.
940 */ 969 */
941 if (desc_mask) { 970 if (desc_mask) {
942 FIP_DBG("missing descriptors mask %x\n", desc_mask); 971 LIBFCOE_FIP_DBG("missing descriptors mask %x\n", desc_mask);
943 } else { 972 } else {
944 FIP_DBG("performing Clear Virtual Link\n"); 973 LIBFCOE_FIP_DBG("performing Clear Virtual Link\n");
945 fcoe_ctlr_reset(fip, FIP_ST_ENABLED); 974 fcoe_ctlr_reset(fip, FIP_ST_ENABLED);
946 } 975 }
947} 976}
@@ -989,10 +1018,6 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
989 op = ntohs(fiph->fip_op); 1018 op = ntohs(fiph->fip_op);
990 sub = fiph->fip_subcode; 1019 sub = fiph->fip_subcode;
991 1020
992 FIP_DBG_LVL(2, "ver %x op %x/%x dl %x fl %x\n",
993 FIP_VER_DECAPS(fiph->fip_ver), op, sub,
994 ntohs(fiph->fip_dl_len), ntohs(fiph->fip_flags));
995
996 if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) 1021 if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
997 goto drop; 1022 goto drop;
998 if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) 1023 if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
@@ -1004,7 +1029,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
1004 fip->map_dest = 0; 1029 fip->map_dest = 0;
1005 fip->state = FIP_ST_ENABLED; 1030 fip->state = FIP_ST_ENABLED;
1006 state = FIP_ST_ENABLED; 1031 state = FIP_ST_ENABLED;
1007 FIP_DBG("using FIP mode\n"); 1032 LIBFCOE_FIP_DBG("Using FIP mode\n");
1008 } 1033 }
1009 spin_unlock_bh(&fip->lock); 1034 spin_unlock_bh(&fip->lock);
1010 if (state != FIP_ST_ENABLED) 1035 if (state != FIP_ST_ENABLED)
@@ -1039,14 +1064,15 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
1039 struct fcoe_fcf *best = NULL; 1064 struct fcoe_fcf *best = NULL;
1040 1065
1041 list_for_each_entry(fcf, &fip->fcfs, list) { 1066 list_for_each_entry(fcf, &fip->fcfs, list) {
1042 FIP_DBG("consider FCF for fab %llx VFID %d map %x val %d\n", 1067 LIBFCOE_FIP_DBG("consider FCF for fab %llx VFID %d map %x "
1043 fcf->fabric_name, fcf->vfid, 1068 "val %d\n", fcf->fabric_name, fcf->vfid,
1044 fcf->fc_map, fcoe_ctlr_mtu_valid(fcf)); 1069 fcf->fc_map, fcoe_ctlr_mtu_valid(fcf));
1045 if (!fcoe_ctlr_fcf_usable(fcf)) { 1070 if (!fcoe_ctlr_fcf_usable(fcf)) {
1046 FIP_DBG("FCF for fab %llx map %x %svalid %savailable\n", 1071 LIBFCOE_FIP_DBG("FCF for fab %llx map %x %svalid "
1047 fcf->fabric_name, fcf->fc_map, 1072 "%savailable\n", fcf->fabric_name,
1048 (fcf->flags & FIP_FL_SOL) ? "" : "in", 1073 fcf->fc_map, (fcf->flags & FIP_FL_SOL)
1049 (fcf->flags & FIP_FL_AVAIL) ? "" : "un"); 1074 ? "" : "in", (fcf->flags & FIP_FL_AVAIL)
1075 ? "" : "un");
1050 continue; 1076 continue;
1051 } 1077 }
1052 if (!best) { 1078 if (!best) {
@@ -1056,7 +1082,8 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
1056 if (fcf->fabric_name != best->fabric_name || 1082 if (fcf->fabric_name != best->fabric_name ||
1057 fcf->vfid != best->vfid || 1083 fcf->vfid != best->vfid ||
1058 fcf->fc_map != best->fc_map) { 1084 fcf->fc_map != best->fc_map) {
1059 FIP_DBG("conflicting fabric, VFID, or FC-MAP\n"); 1085 LIBFCOE_FIP_DBG("Conflicting fabric, VFID, "
1086 "or FC-MAP\n");
1060 return; 1087 return;
1061 } 1088 }
1062 if (fcf->pri < best->pri) 1089 if (fcf->pri < best->pri)
@@ -1100,7 +1127,7 @@ static void fcoe_ctlr_timeout(unsigned long arg)
1100 if (sel != fcf) { 1127 if (sel != fcf) {
1101 fcf = sel; /* the old FCF may have been freed */ 1128 fcf = sel; /* the old FCF may have been freed */
1102 if (sel) { 1129 if (sel) {
1103 printk(KERN_INFO "host%d: FIP selected " 1130 printk(KERN_INFO "libfcoe: host%d: FIP selected "
1104 "Fibre-Channel Forwarder MAC %s\n", 1131 "Fibre-Channel Forwarder MAC %s\n",
1105 fip->lp->host->host_no, 1132 fip->lp->host->host_no,
1106 print_mac(buf, sel->fcf_mac)); 1133 print_mac(buf, sel->fcf_mac));
@@ -1110,7 +1137,7 @@ static void fcoe_ctlr_timeout(unsigned long arg)
1110 fip->ctlr_ka_time = jiffies + sel->fka_period; 1137 fip->ctlr_ka_time = jiffies + sel->fka_period;
1111 fip->link = 1; 1138 fip->link = 1;
1112 } else { 1139 } else {
1113 printk(KERN_NOTICE "host%d: " 1140 printk(KERN_NOTICE "libfcoe: host%d: "
1114 "FIP Fibre-Channel Forwarder timed out. " 1141 "FIP Fibre-Channel Forwarder timed out. "
1115 "Starting FCF discovery.\n", 1142 "Starting FCF discovery.\n",
1116 fip->lp->host->host_no); 1143 fip->lp->host->host_no);
@@ -1234,7 +1261,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa)
1234 return -EINVAL; 1261 return -EINVAL;
1235 } 1262 }
1236 fip->state = FIP_ST_NON_FIP; 1263 fip->state = FIP_ST_NON_FIP;
1237 FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n"); 1264 LIBFCOE_FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n");
1238 1265
1239 /* 1266 /*
1240 * FLOGI accepted. 1267 * FLOGI accepted.
@@ -1263,7 +1290,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa)
1263 memcpy(fip->dest_addr, sa, ETH_ALEN); 1290 memcpy(fip->dest_addr, sa, ETH_ALEN);
1264 fip->map_dest = 0; 1291 fip->map_dest = 0;
1265 if (fip->state == FIP_ST_NON_FIP) 1292 if (fip->state == FIP_ST_NON_FIP)
1266 FIP_DBG("received FLOGI REQ, " 1293 LIBFCOE_FIP_DBG("received FLOGI REQ, "
1267 "using non-FIP mode\n"); 1294 "using non-FIP mode\n");
1268 fip->state = FIP_ST_NON_FIP; 1295 fip->state = FIP_ST_NON_FIP;
1269 } 1296 }
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 32ef6b87d895..a84072865fc2 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -680,6 +680,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
680 } 680 }
681 681
682 lp->max_retry_count = fnic->config.flogi_retries; 682 lp->max_retry_count = fnic->config.flogi_retries;
683 lp->max_rport_retry_count = fnic->config.plogi_retries;
683 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 684 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
684 FCP_SPPF_CONF_COMPL); 685 FCP_SPPF_CONF_COMPL);
685 if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) 686 if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 59349a316e13..1258da34fbc2 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -152,6 +152,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
152 struct Scsi_Host *host, gdth_ha_str *ha) 152 struct Scsi_Host *host, gdth_ha_str *ha)
153{ 153{
154 int size = 0,len = 0; 154 int size = 0,len = 0;
155 int hlen;
155 off_t begin = 0,pos = 0; 156 off_t begin = 0,pos = 0;
156 int id, i, j, k, sec, flag; 157 int id, i, j, k, sec, flag;
157 int no_mdrv = 0, drv_no, is_mirr; 158 int no_mdrv = 0, drv_no, is_mirr;
@@ -192,11 +193,11 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
192 if (reserve_list[0] == 0xff) 193 if (reserve_list[0] == 0xff)
193 strcpy(hrec, "--"); 194 strcpy(hrec, "--");
194 else { 195 else {
195 sprintf(hrec, "%d", reserve_list[0]); 196 hlen = sprintf(hrec, "%d", reserve_list[0]);
196 for (i = 1; i < MAX_RES_ARGS; i++) { 197 for (i = 1; i < MAX_RES_ARGS; i++) {
197 if (reserve_list[i] == 0xff) 198 if (reserve_list[i] == 0xff)
198 break; 199 break;
199 sprintf(hrec,"%s,%d", hrec, reserve_list[i]); 200 hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]);
200 } 201 }
201 } 202 }
202 size = sprintf(buffer+len, 203 size = sprintf(buffer+len,
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 89d41a424b33..5fd2da494d08 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -40,7 +40,7 @@
40#include "scsi_logging.h" 40#include "scsi_logging.h"
41 41
42 42
43static int scsi_host_next_hn; /* host_no for next new host */ 43static atomic_t scsi_host_next_hn; /* host_no for next new host */
44 44
45 45
46static void scsi_host_cls_release(struct device *dev) 46static void scsi_host_cls_release(struct device *dev)
@@ -333,7 +333,11 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
333 333
334 mutex_init(&shost->scan_mutex); 334 mutex_init(&shost->scan_mutex);
335 335
336 shost->host_no = scsi_host_next_hn++; /* XXX(hch): still racy */ 336 /*
337 * subtract one because we increment first then return, but we need to
338 * know what the next host number was before increment
339 */
340 shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
337 shost->dma_channel = 0xff; 341 shost->dma_channel = 0xff;
338 342
339 /* These three are default values which can be overridden */ 343 /* These three are default values which can be overridden */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index ea4abee7a2a9..166d96450a0e 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -110,7 +110,7 @@ static const struct {
110 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" }, 110 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
111 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" }, 111 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
112 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" }, 112 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
113 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" }, 113 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
114 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" }, 114 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
115 115
116 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" }, 116 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
@@ -143,6 +143,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *);
143static void ibmvfc_tgt_send_prli(struct ibmvfc_target *); 143static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
144static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *); 144static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
145static void ibmvfc_tgt_query_target(struct ibmvfc_target *); 145static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
146static void ibmvfc_npiv_logout(struct ibmvfc_host *);
146 147
147static const char *unknown_error = "unknown error"; 148static const char *unknown_error = "unknown error";
148 149
@@ -275,7 +276,7 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
275 int fc_rsp_len = rsp->fcp_rsp_len; 276 int fc_rsp_len = rsp->fcp_rsp_len;
276 277
277 if ((rsp->flags & FCP_RSP_LEN_VALID) && 278 if ((rsp->flags & FCP_RSP_LEN_VALID) &&
278 ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || 279 ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
279 rsp->data.info.rsp_code)) 280 rsp->data.info.rsp_code))
280 return DID_ERROR << 16; 281 return DID_ERROR << 16;
281 282
@@ -431,6 +432,8 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
431 case IBMVFC_TGT_ACTION_DEL_RPORT: 432 case IBMVFC_TGT_ACTION_DEL_RPORT:
432 break; 433 break;
433 default: 434 default:
435 if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
436 tgt->add_rport = 0;
434 tgt->action = action; 437 tgt->action = action;
435 break; 438 break;
436 } 439 }
@@ -475,6 +478,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
475 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) 478 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
476 vhost->action = action; 479 vhost->action = action;
477 break; 480 break;
481 case IBMVFC_HOST_ACTION_LOGO_WAIT:
482 if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
483 vhost->action = action;
484 break;
478 case IBMVFC_HOST_ACTION_INIT_WAIT: 485 case IBMVFC_HOST_ACTION_INIT_WAIT:
479 if (vhost->action == IBMVFC_HOST_ACTION_INIT) 486 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
480 vhost->action = action; 487 vhost->action = action;
@@ -483,7 +490,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
483 switch (vhost->action) { 490 switch (vhost->action) {
484 case IBMVFC_HOST_ACTION_INIT_WAIT: 491 case IBMVFC_HOST_ACTION_INIT_WAIT:
485 case IBMVFC_HOST_ACTION_NONE: 492 case IBMVFC_HOST_ACTION_NONE:
486 case IBMVFC_HOST_ACTION_TGT_ADD: 493 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
487 vhost->action = action; 494 vhost->action = action;
488 break; 495 break;
489 default: 496 default:
@@ -494,11 +501,11 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
494 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) 501 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
495 vhost->action = action; 502 vhost->action = action;
496 break; 503 break;
504 case IBMVFC_HOST_ACTION_LOGO:
497 case IBMVFC_HOST_ACTION_INIT: 505 case IBMVFC_HOST_ACTION_INIT:
498 case IBMVFC_HOST_ACTION_TGT_DEL: 506 case IBMVFC_HOST_ACTION_TGT_DEL:
499 case IBMVFC_HOST_ACTION_QUERY_TGTS: 507 case IBMVFC_HOST_ACTION_QUERY_TGTS:
500 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: 508 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
501 case IBMVFC_HOST_ACTION_TGT_ADD:
502 case IBMVFC_HOST_ACTION_NONE: 509 case IBMVFC_HOST_ACTION_NONE:
503 default: 510 default:
504 vhost->action = action; 511 vhost->action = action;
@@ -576,7 +583,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
576 } 583 }
577 584
578 list_for_each_entry(tgt, &vhost->targets, queue) 585 list_for_each_entry(tgt, &vhost->targets, queue)
579 tgt->need_login = 1; 586 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
580 scsi_block_requests(vhost->host); 587 scsi_block_requests(vhost->host);
581 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 588 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
582 vhost->job_step = ibmvfc_npiv_login; 589 vhost->job_step = ibmvfc_npiv_login;
@@ -646,6 +653,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
646 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 653 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
647 654
648 vhost->state = IBMVFC_NO_CRQ; 655 vhost->state = IBMVFC_NO_CRQ;
656 vhost->logged_in = 0;
649 dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 657 dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
650 free_page((unsigned long)crq->msgs); 658 free_page((unsigned long)crq->msgs);
651} 659}
@@ -692,6 +700,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
692 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 700 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
693 701
694 vhost->state = IBMVFC_NO_CRQ; 702 vhost->state = IBMVFC_NO_CRQ;
703 vhost->logged_in = 0;
695 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 704 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
696 705
697 /* Clean out the queue */ 706 /* Clean out the queue */
@@ -807,10 +816,10 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
807} 816}
808 817
809/** 818/**
810 * __ibmvfc_reset_host - Reset the connection to the server (no locking) 819 * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
811 * @vhost: struct ibmvfc host to reset 820 * @vhost: struct ibmvfc host to reset
812 **/ 821 **/
813static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) 822static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
814{ 823{
815 int rc; 824 int rc;
816 825
@@ -826,9 +835,25 @@ static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
826} 835}
827 836
828/** 837/**
829 * ibmvfc_reset_host - Reset the connection to the server 838 * __ibmvfc_reset_host - Reset the connection to the server (no locking)
830 * @vhost: struct ibmvfc host to reset 839 * @vhost: struct ibmvfc host to reset
831 **/ 840 **/
841static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
842{
843 if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
844 !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
845 scsi_block_requests(vhost->host);
846 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
847 vhost->job_step = ibmvfc_npiv_logout;
848 wake_up(&vhost->work_wait_q);
849 } else
850 ibmvfc_hard_reset_host(vhost);
851}
852
853/**
854 * ibmvfc_reset_host - Reset the connection to the server
855 * @vhost: ibmvfc host struct
856 **/
832static void ibmvfc_reset_host(struct ibmvfc_host *vhost) 857static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
833{ 858{
834 unsigned long flags; 859 unsigned long flags;
@@ -842,9 +867,13 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
842 * ibmvfc_retry_host_init - Retry host initialization if allowed 867 * ibmvfc_retry_host_init - Retry host initialization if allowed
843 * @vhost: ibmvfc host struct 868 * @vhost: ibmvfc host struct
844 * 869 *
870 * Returns: 1 if init will be retried / 0 if not
871 *
845 **/ 872 **/
846static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) 873static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
847{ 874{
875 int retry = 0;
876
848 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { 877 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
849 vhost->delay_init = 1; 878 vhost->delay_init = 1;
850 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) { 879 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
@@ -853,11 +882,14 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
853 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); 882 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
854 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES) 883 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
855 __ibmvfc_reset_host(vhost); 884 __ibmvfc_reset_host(vhost);
856 else 885 else {
857 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 886 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
887 retry = 1;
888 }
858 } 889 }
859 890
860 wake_up(&vhost->work_wait_q); 891 wake_up(&vhost->work_wait_q);
892 return retry;
861} 893}
862 894
863/** 895/**
@@ -1137,8 +1169,9 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1137 login_info->partition_num = vhost->partition_number; 1169 login_info->partition_num = vhost->partition_number;
1138 login_info->vfc_frame_version = 1; 1170 login_info->vfc_frame_version = 1;
1139 login_info->fcp_version = 3; 1171 login_info->fcp_version = 3;
1172 login_info->flags = IBMVFC_FLUSH_ON_HALT;
1140 if (vhost->client_migrated) 1173 if (vhost->client_migrated)
1141 login_info->flags = IBMVFC_CLIENT_MIGRATED; 1174 login_info->flags |= IBMVFC_CLIENT_MIGRATED;
1142 1175
1143 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; 1176 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1144 login_info->capabilities = IBMVFC_CAN_MIGRATE; 1177 login_info->capabilities = IBMVFC_CAN_MIGRATE;
@@ -1452,6 +1485,27 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
1452} 1485}
1453 1486
1454/** 1487/**
1488 * ibmvfc_relogin - Log back into the specified device
1489 * @sdev: scsi device struct
1490 *
1491 **/
1492static void ibmvfc_relogin(struct scsi_device *sdev)
1493{
1494 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1495 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1496 struct ibmvfc_target *tgt;
1497
1498 list_for_each_entry(tgt, &vhost->targets, queue) {
1499 if (rport == tgt->rport) {
1500 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
1501 break;
1502 }
1503 }
1504
1505 ibmvfc_reinit_host(vhost);
1506}
1507
1508/**
1455 * ibmvfc_scsi_done - Handle responses from commands 1509 * ibmvfc_scsi_done - Handle responses from commands
1456 * @evt: ibmvfc event to be handled 1510 * @evt: ibmvfc event to be handled
1457 * 1511 *
@@ -1483,7 +1537,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1483 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) 1537 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1484 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); 1538 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1485 if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED)) 1539 if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
1486 ibmvfc_reinit_host(evt->vhost); 1540 ibmvfc_relogin(cmnd->device);
1487 1541
1488 if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER))) 1542 if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1489 cmnd->result = (DID_ERROR << 16); 1543 cmnd->result = (DID_ERROR << 16);
@@ -2148,13 +2202,31 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2148 struct ibmvfc_host *vhost) 2202 struct ibmvfc_host *vhost)
2149{ 2203{
2150 const char *desc = ibmvfc_get_ae_desc(crq->event); 2204 const char *desc = ibmvfc_get_ae_desc(crq->event);
2205 struct ibmvfc_target *tgt;
2151 2206
2152 ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx," 2207 ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx,"
2153 " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); 2208 " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
2154 2209
2155 switch (crq->event) { 2210 switch (crq->event) {
2156 case IBMVFC_AE_LINK_UP:
2157 case IBMVFC_AE_RESUME: 2211 case IBMVFC_AE_RESUME:
2212 switch (crq->link_state) {
2213 case IBMVFC_AE_LS_LINK_DOWN:
2214 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2215 break;
2216 case IBMVFC_AE_LS_LINK_DEAD:
2217 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2218 break;
2219 case IBMVFC_AE_LS_LINK_UP:
2220 case IBMVFC_AE_LS_LINK_BOUNCED:
2221 default:
2222 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2223 vhost->delay_init = 1;
2224 __ibmvfc_reset_host(vhost);
2225 break;
2226 };
2227
2228 break;
2229 case IBMVFC_AE_LINK_UP:
2158 vhost->events_to_log |= IBMVFC_AE_LINKUP; 2230 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2159 vhost->delay_init = 1; 2231 vhost->delay_init = 1;
2160 __ibmvfc_reset_host(vhost); 2232 __ibmvfc_reset_host(vhost);
@@ -2168,10 +2240,27 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2168 case IBMVFC_AE_SCN_NPORT: 2240 case IBMVFC_AE_SCN_NPORT:
2169 case IBMVFC_AE_SCN_GROUP: 2241 case IBMVFC_AE_SCN_GROUP:
2170 vhost->events_to_log |= IBMVFC_AE_RSCN; 2242 vhost->events_to_log |= IBMVFC_AE_RSCN;
2243 ibmvfc_reinit_host(vhost);
2244 break;
2171 case IBMVFC_AE_ELS_LOGO: 2245 case IBMVFC_AE_ELS_LOGO:
2172 case IBMVFC_AE_ELS_PRLO: 2246 case IBMVFC_AE_ELS_PRLO:
2173 case IBMVFC_AE_ELS_PLOGI: 2247 case IBMVFC_AE_ELS_PLOGI:
2174 ibmvfc_reinit_host(vhost); 2248 list_for_each_entry(tgt, &vhost->targets, queue) {
2249 if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
2250 break;
2251 if (crq->scsi_id && tgt->scsi_id != crq->scsi_id)
2252 continue;
2253 if (crq->wwpn && tgt->ids.port_name != crq->wwpn)
2254 continue;
2255 if (crq->node_name && tgt->ids.node_name != crq->node_name)
2256 continue;
2257 if (tgt->need_login && crq->event == IBMVFC_AE_ELS_LOGO)
2258 tgt->logo_rcvd = 1;
2259 if (!tgt->need_login || crq->event == IBMVFC_AE_ELS_PLOGI) {
2260 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2261 ibmvfc_reinit_host(vhost);
2262 }
2263 }
2175 break; 2264 break;
2176 case IBMVFC_AE_LINK_DOWN: 2265 case IBMVFC_AE_LINK_DOWN:
2177 case IBMVFC_AE_ADAPTER_FAILED: 2266 case IBMVFC_AE_ADAPTER_FAILED:
@@ -2222,6 +2311,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2222 return; 2311 return;
2223 case IBMVFC_CRQ_XPORT_EVENT: 2312 case IBMVFC_CRQ_XPORT_EVENT:
2224 vhost->state = IBMVFC_NO_CRQ; 2313 vhost->state = IBMVFC_NO_CRQ;
2314 vhost->logged_in = 0;
2225 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 2315 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2226 if (crq->format == IBMVFC_PARTITION_MIGRATED) { 2316 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2227 /* We need to re-setup the interpartition connection */ 2317 /* We need to re-setup the interpartition connection */
@@ -2299,7 +2389,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2299 done = 1; 2389 done = 1;
2300 } 2390 }
2301 2391
2302 if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE) 2392 if (vhost->scan_complete)
2303 done = 1; 2393 done = 1;
2304 spin_unlock_irqrestore(shost->host_lock, flags); 2394 spin_unlock_irqrestore(shost->host_lock, flags);
2305 return done; 2395 return done;
@@ -2434,14 +2524,6 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
2434 vhost->login_buf->resp.partition_name); 2524 vhost->login_buf->resp.partition_name);
2435} 2525}
2436 2526
2437static struct device_attribute ibmvfc_host_partition_name = {
2438 .attr = {
2439 .name = "partition_name",
2440 .mode = S_IRUGO,
2441 },
2442 .show = ibmvfc_show_host_partition_name,
2443};
2444
2445static ssize_t ibmvfc_show_host_device_name(struct device *dev, 2527static ssize_t ibmvfc_show_host_device_name(struct device *dev,
2446 struct device_attribute *attr, char *buf) 2528 struct device_attribute *attr, char *buf)
2447{ 2529{
@@ -2452,14 +2534,6 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev,
2452 vhost->login_buf->resp.device_name); 2534 vhost->login_buf->resp.device_name);
2453} 2535}
2454 2536
2455static struct device_attribute ibmvfc_host_device_name = {
2456 .attr = {
2457 .name = "device_name",
2458 .mode = S_IRUGO,
2459 },
2460 .show = ibmvfc_show_host_device_name,
2461};
2462
2463static ssize_t ibmvfc_show_host_loc_code(struct device *dev, 2537static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
2464 struct device_attribute *attr, char *buf) 2538 struct device_attribute *attr, char *buf)
2465{ 2539{
@@ -2470,14 +2544,6 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
2470 vhost->login_buf->resp.port_loc_code); 2544 vhost->login_buf->resp.port_loc_code);
2471} 2545}
2472 2546
2473static struct device_attribute ibmvfc_host_loc_code = {
2474 .attr = {
2475 .name = "port_loc_code",
2476 .mode = S_IRUGO,
2477 },
2478 .show = ibmvfc_show_host_loc_code,
2479};
2480
2481static ssize_t ibmvfc_show_host_drc_name(struct device *dev, 2547static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
2482 struct device_attribute *attr, char *buf) 2548 struct device_attribute *attr, char *buf)
2483{ 2549{
@@ -2488,14 +2554,6 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
2488 vhost->login_buf->resp.drc_name); 2554 vhost->login_buf->resp.drc_name);
2489} 2555}
2490 2556
2491static struct device_attribute ibmvfc_host_drc_name = {
2492 .attr = {
2493 .name = "drc_name",
2494 .mode = S_IRUGO,
2495 },
2496 .show = ibmvfc_show_host_drc_name,
2497};
2498
2499static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, 2557static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
2500 struct device_attribute *attr, char *buf) 2558 struct device_attribute *attr, char *buf)
2501{ 2559{
@@ -2504,13 +2562,13 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
2504 return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version); 2562 return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
2505} 2563}
2506 2564
2507static struct device_attribute ibmvfc_host_npiv_version = { 2565static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
2508 .attr = { 2566 struct device_attribute *attr, char *buf)
2509 .name = "npiv_version", 2567{
2510 .mode = S_IRUGO, 2568 struct Scsi_Host *shost = class_to_shost(dev);
2511 }, 2569 struct ibmvfc_host *vhost = shost_priv(shost);
2512 .show = ibmvfc_show_host_npiv_version, 2570 return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
2513}; 2571}
2514 2572
2515/** 2573/**
2516 * ibmvfc_show_log_level - Show the adapter's error logging level 2574 * ibmvfc_show_log_level - Show the adapter's error logging level
@@ -2556,14 +2614,14 @@ static ssize_t ibmvfc_store_log_level(struct device *dev,
2556 return strlen(buf); 2614 return strlen(buf);
2557} 2615}
2558 2616
2559static struct device_attribute ibmvfc_log_level_attr = { 2617static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
2560 .attr = { 2618static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
2561 .name = "log_level", 2619static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
2562 .mode = S_IRUGO | S_IWUSR, 2620static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
2563 }, 2621static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
2564 .show = ibmvfc_show_log_level, 2622static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
2565 .store = ibmvfc_store_log_level 2623static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
2566}; 2624 ibmvfc_show_log_level, ibmvfc_store_log_level);
2567 2625
2568#ifdef CONFIG_SCSI_IBMVFC_TRACE 2626#ifdef CONFIG_SCSI_IBMVFC_TRACE
2569/** 2627/**
@@ -2612,12 +2670,13 @@ static struct bin_attribute ibmvfc_trace_attr = {
2612#endif 2670#endif
2613 2671
2614static struct device_attribute *ibmvfc_attrs[] = { 2672static struct device_attribute *ibmvfc_attrs[] = {
2615 &ibmvfc_host_partition_name, 2673 &dev_attr_partition_name,
2616 &ibmvfc_host_device_name, 2674 &dev_attr_device_name,
2617 &ibmvfc_host_loc_code, 2675 &dev_attr_port_loc_code,
2618 &ibmvfc_host_drc_name, 2676 &dev_attr_drc_name,
2619 &ibmvfc_host_npiv_version, 2677 &dev_attr_npiv_version,
2620 &ibmvfc_log_level_attr, 2678 &dev_attr_capabilities,
2679 &dev_attr_log_level,
2621 NULL 2680 NULL
2622}; 2681};
2623 2682
@@ -2727,27 +2786,27 @@ static void ibmvfc_tasklet(void *data)
2727 2786
2728 spin_lock_irqsave(vhost->host->host_lock, flags); 2787 spin_lock_irqsave(vhost->host->host_lock, flags);
2729 while (!done) { 2788 while (!done) {
2730 /* Pull all the valid messages off the CRQ */
2731 while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
2732 ibmvfc_handle_crq(crq, vhost);
2733 crq->valid = 0;
2734 }
2735
2736 /* Pull all the valid messages off the async CRQ */ 2789 /* Pull all the valid messages off the async CRQ */
2737 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) { 2790 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
2738 ibmvfc_handle_async(async, vhost); 2791 ibmvfc_handle_async(async, vhost);
2739 async->valid = 0; 2792 async->valid = 0;
2740 } 2793 }
2741 2794
2742 vio_enable_interrupts(vdev); 2795 /* Pull all the valid messages off the CRQ */
2743 if ((crq = ibmvfc_next_crq(vhost)) != NULL) { 2796 while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
2744 vio_disable_interrupts(vdev);
2745 ibmvfc_handle_crq(crq, vhost); 2797 ibmvfc_handle_crq(crq, vhost);
2746 crq->valid = 0; 2798 crq->valid = 0;
2747 } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) { 2799 }
2800
2801 vio_enable_interrupts(vdev);
2802 if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
2748 vio_disable_interrupts(vdev); 2803 vio_disable_interrupts(vdev);
2749 ibmvfc_handle_async(async, vhost); 2804 ibmvfc_handle_async(async, vhost);
2750 async->valid = 0; 2805 async->valid = 0;
2806 } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
2807 vio_disable_interrupts(vdev);
2808 ibmvfc_handle_crq(crq, vhost);
2809 crq->valid = 0;
2751 } else 2810 } else
2752 done = 1; 2811 done = 1;
2753 } 2812 }
@@ -2774,15 +2833,19 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
2774 * @tgt: ibmvfc target struct 2833 * @tgt: ibmvfc target struct
2775 * @job_step: initialization job step 2834 * @job_step: initialization job step
2776 * 2835 *
2836 * Returns: 1 if step will be retried / 0 if not
2837 *
2777 **/ 2838 **/
2778static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, 2839static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
2779 void (*job_step) (struct ibmvfc_target *)) 2840 void (*job_step) (struct ibmvfc_target *))
2780{ 2841{
2781 if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) { 2842 if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
2782 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2843 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2783 wake_up(&tgt->vhost->work_wait_q); 2844 wake_up(&tgt->vhost->work_wait_q);
2845 return 0;
2784 } else 2846 } else
2785 ibmvfc_init_tgt(tgt, job_step); 2847 ibmvfc_init_tgt(tgt, job_step);
2848 return 1;
2786} 2849}
2787 2850
2788/* Defined in FC-LS */ 2851/* Defined in FC-LS */
@@ -2831,7 +2894,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2831 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; 2894 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
2832 struct ibmvfc_prli_svc_parms *parms = &rsp->parms; 2895 struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
2833 u32 status = rsp->common.status; 2896 u32 status = rsp->common.status;
2834 int index; 2897 int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
2835 2898
2836 vhost->discovery_threads--; 2899 vhost->discovery_threads--;
2837 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 2900 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -2850,7 +2913,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2850 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET; 2913 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
2851 if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC) 2914 if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC)
2852 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; 2915 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
2853 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT); 2916 tgt->add_rport = 1;
2854 } else 2917 } else
2855 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2918 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2856 } else if (prli_rsp[index].retry) 2919 } else if (prli_rsp[index].retry)
@@ -2867,13 +2930,18 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2867 break; 2930 break;
2868 case IBMVFC_MAD_FAILED: 2931 case IBMVFC_MAD_FAILED:
2869 default: 2932 default:
2870 tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n", 2933 if ((rsp->status & IBMVFC_VIOS_FAILURE) && rsp->error == IBMVFC_PLOGI_REQUIRED)
2871 ibmvfc_get_cmd_error(rsp->status, rsp->error), 2934 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2872 rsp->status, rsp->error, status); 2935 else if (tgt->logo_rcvd)
2873 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 2936 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2874 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); 2937 else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2938 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
2875 else 2939 else
2876 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2940 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2941
2942 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
2943 ibmvfc_get_cmd_error(rsp->status, rsp->error),
2944 rsp->status, rsp->error, status);
2877 break; 2945 break;
2878 }; 2946 };
2879 2947
@@ -2932,6 +3000,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2932 struct ibmvfc_host *vhost = evt->vhost; 3000 struct ibmvfc_host *vhost = evt->vhost;
2933 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; 3001 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
2934 u32 status = rsp->common.status; 3002 u32 status = rsp->common.status;
3003 int level = IBMVFC_DEFAULT_LOG_LEVEL;
2935 3004
2936 vhost->discovery_threads--; 3005 vhost->discovery_threads--;
2937 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 3006 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -2960,15 +3029,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2960 break; 3029 break;
2961 case IBMVFC_MAD_FAILED: 3030 case IBMVFC_MAD_FAILED:
2962 default: 3031 default:
2963 tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
2964 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
2965 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
2966 ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
2967
2968 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3032 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2969 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); 3033 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2970 else 3034 else
2971 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3035 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3036
3037 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3038 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3039 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3040 ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
2972 break; 3041 break;
2973 }; 3042 };
2974 3043
@@ -2992,6 +3061,7 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
2992 return; 3061 return;
2993 3062
2994 kref_get(&tgt->kref); 3063 kref_get(&tgt->kref);
3064 tgt->logo_rcvd = 0;
2995 evt = ibmvfc_get_event(vhost); 3065 evt = ibmvfc_get_event(vhost);
2996 vhost->discovery_threads++; 3066 vhost->discovery_threads++;
2997 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); 3067 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
@@ -3129,13 +3199,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3129 case IBMVFC_MAD_SUCCESS: 3199 case IBMVFC_MAD_SUCCESS:
3130 tgt_dbg(tgt, "ADISC succeeded\n"); 3200 tgt_dbg(tgt, "ADISC succeeded\n");
3131 if (ibmvfc_adisc_needs_plogi(mad, tgt)) 3201 if (ibmvfc_adisc_needs_plogi(mad, tgt))
3132 tgt->need_login = 1; 3202 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3133 break; 3203 break;
3134 case IBMVFC_MAD_DRIVER_FAILED: 3204 case IBMVFC_MAD_DRIVER_FAILED:
3135 break; 3205 break;
3136 case IBMVFC_MAD_FAILED: 3206 case IBMVFC_MAD_FAILED:
3137 default: 3207 default:
3138 tgt->need_login = 1; 3208 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3139 fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16; 3209 fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
3140 fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8; 3210 fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
3141 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", 3211 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
@@ -3322,6 +3392,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3322 struct ibmvfc_host *vhost = evt->vhost; 3392 struct ibmvfc_host *vhost = evt->vhost;
3323 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; 3393 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
3324 u32 status = rsp->common.status; 3394 u32 status = rsp->common.status;
3395 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3325 3396
3326 vhost->discovery_threads--; 3397 vhost->discovery_threads--;
3327 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 3398 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -3341,19 +3412,19 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3341 break; 3412 break;
3342 case IBMVFC_MAD_FAILED: 3413 case IBMVFC_MAD_FAILED:
3343 default: 3414 default:
3344 tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3345 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3346 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3347 ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
3348
3349 if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && 3415 if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
3350 rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ && 3416 rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
3351 rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG) 3417 rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
3352 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3418 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3353 else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3419 else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3354 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); 3420 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
3355 else 3421 else
3356 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3422 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3423
3424 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3425 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3426 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3427 ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
3357 break; 3428 break;
3358 }; 3429 };
3359 3430
@@ -3420,7 +3491,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
3420 } 3491 }
3421 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3492 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3422 3493
3423 tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL); 3494 tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
3424 if (!tgt) { 3495 if (!tgt) {
3425 dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n", 3496 dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
3426 scsi_id); 3497 scsi_id);
@@ -3472,6 +3543,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3472 struct ibmvfc_host *vhost = evt->vhost; 3543 struct ibmvfc_host *vhost = evt->vhost;
3473 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; 3544 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
3474 u32 mad_status = rsp->common.status; 3545 u32 mad_status = rsp->common.status;
3546 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3475 3547
3476 switch (mad_status) { 3548 switch (mad_status) {
3477 case IBMVFC_MAD_SUCCESS: 3549 case IBMVFC_MAD_SUCCESS:
@@ -3480,9 +3552,9 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3480 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); 3552 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
3481 break; 3553 break;
3482 case IBMVFC_MAD_FAILED: 3554 case IBMVFC_MAD_FAILED:
3483 dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n", 3555 level += ibmvfc_retry_host_init(vhost);
3484 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); 3556 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
3485 ibmvfc_retry_host_init(vhost); 3557 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3486 break; 3558 break;
3487 case IBMVFC_MAD_DRIVER_FAILED: 3559 case IBMVFC_MAD_DRIVER_FAILED:
3488 break; 3560 break;
@@ -3534,18 +3606,19 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
3534 u32 mad_status = evt->xfer_iu->npiv_login.common.status; 3606 u32 mad_status = evt->xfer_iu->npiv_login.common.status;
3535 struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; 3607 struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
3536 unsigned int npiv_max_sectors; 3608 unsigned int npiv_max_sectors;
3609 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3537 3610
3538 switch (mad_status) { 3611 switch (mad_status) {
3539 case IBMVFC_MAD_SUCCESS: 3612 case IBMVFC_MAD_SUCCESS:
3540 ibmvfc_free_event(evt); 3613 ibmvfc_free_event(evt);
3541 break; 3614 break;
3542 case IBMVFC_MAD_FAILED: 3615 case IBMVFC_MAD_FAILED:
3543 dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n",
3544 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3545 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3616 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3546 ibmvfc_retry_host_init(vhost); 3617 level += ibmvfc_retry_host_init(vhost);
3547 else 3618 else
3548 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); 3619 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3620 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
3621 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3549 ibmvfc_free_event(evt); 3622 ibmvfc_free_event(evt);
3550 return; 3623 return;
3551 case IBMVFC_MAD_CRQ_ERROR: 3624 case IBMVFC_MAD_CRQ_ERROR:
@@ -3578,6 +3651,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
3578 return; 3651 return;
3579 } 3652 }
3580 3653
3654 vhost->logged_in = 1;
3581 npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS); 3655 npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
3582 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", 3656 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
3583 rsp->partition_name, rsp->device_name, rsp->port_loc_code, 3657 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
@@ -3636,6 +3710,65 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
3636}; 3710};
3637 3711
3638/** 3712/**
3713 * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
3714 * @vhost: ibmvfc host struct
3715 *
3716 **/
3717static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
3718{
3719 struct ibmvfc_host *vhost = evt->vhost;
3720 u32 mad_status = evt->xfer_iu->npiv_logout.common.status;
3721
3722 ibmvfc_free_event(evt);
3723
3724 switch (mad_status) {
3725 case IBMVFC_MAD_SUCCESS:
3726 if (list_empty(&vhost->sent) &&
3727 vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
3728 ibmvfc_init_host(vhost, 0);
3729 return;
3730 }
3731 break;
3732 case IBMVFC_MAD_FAILED:
3733 case IBMVFC_MAD_NOT_SUPPORTED:
3734 case IBMVFC_MAD_CRQ_ERROR:
3735 case IBMVFC_MAD_DRIVER_FAILED:
3736 default:
3737 ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
3738 break;
3739 }
3740
3741 ibmvfc_hard_reset_host(vhost);
3742}
3743
3744/**
3745 * ibmvfc_npiv_logout - Issue an NPIV Logout
3746 * @vhost: ibmvfc host struct
3747 *
3748 **/
3749static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
3750{
3751 struct ibmvfc_npiv_logout_mad *mad;
3752 struct ibmvfc_event *evt;
3753
3754 evt = ibmvfc_get_event(vhost);
3755 ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
3756
3757 mad = &evt->iu.npiv_logout;
3758 memset(mad, 0, sizeof(*mad));
3759 mad->common.version = 1;
3760 mad->common.opcode = IBMVFC_NPIV_LOGOUT;
3761 mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad);
3762
3763 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
3764
3765 if (!ibmvfc_send_event(evt, vhost, default_timeout))
3766 ibmvfc_dbg(vhost, "Sent NPIV logout\n");
3767 else
3768 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3769}
3770
3771/**
3639 * ibmvfc_dev_init_to_do - Is there target initialization work to do? 3772 * ibmvfc_dev_init_to_do - Is there target initialization work to do?
3640 * @vhost: ibmvfc host struct 3773 * @vhost: ibmvfc host struct
3641 * 3774 *
@@ -3671,6 +3804,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3671 switch (vhost->action) { 3804 switch (vhost->action) {
3672 case IBMVFC_HOST_ACTION_NONE: 3805 case IBMVFC_HOST_ACTION_NONE:
3673 case IBMVFC_HOST_ACTION_INIT_WAIT: 3806 case IBMVFC_HOST_ACTION_INIT_WAIT:
3807 case IBMVFC_HOST_ACTION_LOGO_WAIT:
3674 return 0; 3808 return 0;
3675 case IBMVFC_HOST_ACTION_TGT_INIT: 3809 case IBMVFC_HOST_ACTION_TGT_INIT:
3676 case IBMVFC_HOST_ACTION_QUERY_TGTS: 3810 case IBMVFC_HOST_ACTION_QUERY_TGTS:
@@ -3683,9 +3817,9 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3683 if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) 3817 if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
3684 return 0; 3818 return 0;
3685 return 1; 3819 return 1;
3820 case IBMVFC_HOST_ACTION_LOGO:
3686 case IBMVFC_HOST_ACTION_INIT: 3821 case IBMVFC_HOST_ACTION_INIT:
3687 case IBMVFC_HOST_ACTION_ALLOC_TGTS: 3822 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
3688 case IBMVFC_HOST_ACTION_TGT_ADD:
3689 case IBMVFC_HOST_ACTION_TGT_DEL: 3823 case IBMVFC_HOST_ACTION_TGT_DEL:
3690 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: 3824 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
3691 case IBMVFC_HOST_ACTION_QUERY: 3825 case IBMVFC_HOST_ACTION_QUERY:
@@ -3740,25 +3874,26 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
3740static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) 3874static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
3741{ 3875{
3742 struct ibmvfc_host *vhost = tgt->vhost; 3876 struct ibmvfc_host *vhost = tgt->vhost;
3743 struct fc_rport *rport = tgt->rport; 3877 struct fc_rport *rport;
3744 unsigned long flags; 3878 unsigned long flags;
3745 3879
3746 if (rport) { 3880 tgt_dbg(tgt, "Adding rport\n");
3747 tgt_dbg(tgt, "Setting rport roles\n"); 3881 rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
3748 fc_remote_port_rolechg(rport, tgt->ids.roles); 3882 spin_lock_irqsave(vhost->host->host_lock, flags);
3749 spin_lock_irqsave(vhost->host->host_lock, flags); 3883
3750 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 3884 if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
3885 tgt_dbg(tgt, "Deleting rport\n");
3886 list_del(&tgt->queue);
3751 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3887 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3888 fc_remote_port_delete(rport);
3889 del_timer_sync(&tgt->timer);
3890 kref_put(&tgt->kref, ibmvfc_release_tgt);
3752 return; 3891 return;
3753 } 3892 }
3754 3893
3755 tgt_dbg(tgt, "Adding rport\n");
3756 rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
3757 spin_lock_irqsave(vhost->host->host_lock, flags);
3758 tgt->rport = rport;
3759 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3760 if (rport) { 3894 if (rport) {
3761 tgt_dbg(tgt, "rport add succeeded\n"); 3895 tgt_dbg(tgt, "rport add succeeded\n");
3896 tgt->rport = rport;
3762 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; 3897 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
3763 rport->supported_classes = 0; 3898 rport->supported_classes = 0;
3764 tgt->target_id = rport->scsi_target_id; 3899 tgt->target_id = rport->scsi_target_id;
@@ -3789,8 +3924,12 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3789 vhost->events_to_log = 0; 3924 vhost->events_to_log = 0;
3790 switch (vhost->action) { 3925 switch (vhost->action) {
3791 case IBMVFC_HOST_ACTION_NONE: 3926 case IBMVFC_HOST_ACTION_NONE:
3927 case IBMVFC_HOST_ACTION_LOGO_WAIT:
3792 case IBMVFC_HOST_ACTION_INIT_WAIT: 3928 case IBMVFC_HOST_ACTION_INIT_WAIT:
3793 break; 3929 break;
3930 case IBMVFC_HOST_ACTION_LOGO:
3931 vhost->job_step(vhost);
3932 break;
3794 case IBMVFC_HOST_ACTION_INIT: 3933 case IBMVFC_HOST_ACTION_INIT:
3795 BUG_ON(vhost->state != IBMVFC_INITIALIZING); 3934 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
3796 if (vhost->delay_init) { 3935 if (vhost->delay_init) {
@@ -3836,11 +3975,21 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3836 3975
3837 if (vhost->state == IBMVFC_INITIALIZING) { 3976 if (vhost->state == IBMVFC_INITIALIZING) {
3838 if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) { 3977 if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
3839 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); 3978 if (vhost->reinit) {
3840 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD); 3979 vhost->reinit = 0;
3841 vhost->init_retries = 0; 3980 scsi_block_requests(vhost->host);
3842 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3981 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3843 scsi_unblock_requests(vhost->host); 3982 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3983 } else {
3984 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
3985 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3986 wake_up(&vhost->init_wait_q);
3987 schedule_work(&vhost->rport_add_work_q);
3988 vhost->init_retries = 0;
3989 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3990 scsi_unblock_requests(vhost->host);
3991 }
3992
3844 return; 3993 return;
3845 } else { 3994 } else {
3846 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 3995 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
@@ -3871,24 +4020,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3871 if (!ibmvfc_dev_init_to_do(vhost)) 4020 if (!ibmvfc_dev_init_to_do(vhost))
3872 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED); 4021 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
3873 break; 4022 break;
3874 case IBMVFC_HOST_ACTION_TGT_ADD:
3875 list_for_each_entry(tgt, &vhost->targets, queue) {
3876 if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) {
3877 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3878 ibmvfc_tgt_add_rport(tgt);
3879 return;
3880 }
3881 }
3882
3883 if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
3884 vhost->reinit = 0;
3885 scsi_block_requests(vhost->host);
3886 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3887 } else {
3888 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3889 wake_up(&vhost->init_wait_q);
3890 }
3891 break;
3892 default: 4023 default:
3893 break; 4024 break;
3894 }; 4025 };
@@ -4118,6 +4249,56 @@ nomem:
4118} 4249}
4119 4250
4120/** 4251/**
4252 * ibmvfc_rport_add_thread - Worker thread for rport adds
4253 * @work: work struct
4254 *
4255 **/
4256static void ibmvfc_rport_add_thread(struct work_struct *work)
4257{
4258 struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
4259 rport_add_work_q);
4260 struct ibmvfc_target *tgt;
4261 struct fc_rport *rport;
4262 unsigned long flags;
4263 int did_work;
4264
4265 ENTER;
4266 spin_lock_irqsave(vhost->host->host_lock, flags);
4267 do {
4268 did_work = 0;
4269 if (vhost->state != IBMVFC_ACTIVE)
4270 break;
4271
4272 list_for_each_entry(tgt, &vhost->targets, queue) {
4273 if (tgt->add_rport) {
4274 did_work = 1;
4275 tgt->add_rport = 0;
4276 kref_get(&tgt->kref);
4277 rport = tgt->rport;
4278 if (!rport) {
4279 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4280 ibmvfc_tgt_add_rport(tgt);
4281 } else if (get_device(&rport->dev)) {
4282 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4283 tgt_dbg(tgt, "Setting rport roles\n");
4284 fc_remote_port_rolechg(rport, tgt->ids.roles);
4285 put_device(&rport->dev);
4286 }
4287
4288 kref_put(&tgt->kref, ibmvfc_release_tgt);
4289 spin_lock_irqsave(vhost->host->host_lock, flags);
4290 break;
4291 }
4292 }
4293 } while(did_work);
4294
4295 if (vhost->state == IBMVFC_ACTIVE)
4296 vhost->scan_complete = 1;
4297 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4298 LEAVE;
4299}
4300
4301/**
4121 * ibmvfc_probe - Adapter hot plug add entry point 4302 * ibmvfc_probe - Adapter hot plug add entry point
4122 * @vdev: vio device struct 4303 * @vdev: vio device struct
4123 * @id: vio device id struct 4304 * @id: vio device id struct
@@ -4160,6 +4341,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
4160 strcpy(vhost->partition_name, "UNKNOWN"); 4341 strcpy(vhost->partition_name, "UNKNOWN");
4161 init_waitqueue_head(&vhost->work_wait_q); 4342 init_waitqueue_head(&vhost->work_wait_q);
4162 init_waitqueue_head(&vhost->init_wait_q); 4343 init_waitqueue_head(&vhost->init_wait_q);
4344 INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
4163 4345
4164 if ((rc = ibmvfc_alloc_mem(vhost))) 4346 if ((rc = ibmvfc_alloc_mem(vhost)))
4165 goto free_scsi_host; 4347 goto free_scsi_host;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index ca1dcf7a7568..007fa1c9ef14 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
29#include "viosrp.h" 29#include "viosrp.h"
30 30
31#define IBMVFC_NAME "ibmvfc" 31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.5" 32#define IBMVFC_DRIVER_VERSION "1.0.6"
33#define IBMVFC_DRIVER_DATE "(March 19, 2009)" 33#define IBMVFC_DRIVER_DATE "(May 28, 2009)"
34 34
35#define IBMVFC_DEFAULT_TIMEOUT 60 35#define IBMVFC_DEFAULT_TIMEOUT 60
36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45 36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
@@ -57,9 +57,10 @@
57 * Ensure we have resources for ERP and initialization: 57 * Ensure we have resources for ERP and initialization:
58 * 1 for ERP 58 * 1 for ERP
59 * 1 for initialization 59 * 1 for initialization
60 * 1 for NPIV Logout
60 * 2 for each discovery thread 61 * 2 for each discovery thread
61 */ 62 */
62#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + (disc_threads * 2)) 63#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + (disc_threads * 2))
63 64
64#define IBMVFC_MAD_SUCCESS 0x00 65#define IBMVFC_MAD_SUCCESS 0x00
65#define IBMVFC_MAD_NOT_SUPPORTED 0xF1 66#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
@@ -127,6 +128,7 @@ enum ibmvfc_mad_types {
127 IBMVFC_IMPLICIT_LOGOUT = 0x0040, 128 IBMVFC_IMPLICIT_LOGOUT = 0x0040,
128 IBMVFC_PASSTHRU = 0x0200, 129 IBMVFC_PASSTHRU = 0x0200,
129 IBMVFC_TMF_MAD = 0x0100, 130 IBMVFC_TMF_MAD = 0x0100,
131 IBMVFC_NPIV_LOGOUT = 0x0800,
130}; 132};
131 133
132struct ibmvfc_mad_common { 134struct ibmvfc_mad_common {
@@ -143,6 +145,10 @@ struct ibmvfc_npiv_login_mad {
143 struct srp_direct_buf buffer; 145 struct srp_direct_buf buffer;
144}__attribute__((packed, aligned (8))); 146}__attribute__((packed, aligned (8)));
145 147
148struct ibmvfc_npiv_logout_mad {
149 struct ibmvfc_mad_common common;
150}__attribute__((packed, aligned (8)));
151
146#define IBMVFC_MAX_NAME 256 152#define IBMVFC_MAX_NAME 256
147 153
148struct ibmvfc_npiv_login { 154struct ibmvfc_npiv_login {
@@ -201,7 +207,8 @@ struct ibmvfc_npiv_login_resp {
201#define IBMVFC_NATIVE_FC 0x01 207#define IBMVFC_NATIVE_FC 0x01
202#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 208#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
203 u32 reserved; 209 u32 reserved;
204 u64 capabilites; 210 u64 capabilities;
211#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
205 u32 max_cmds; 212 u32 max_cmds;
206 u32 scsi_id_sz; 213 u32 scsi_id_sz;
207 u64 max_dma_len; 214 u64 max_dma_len;
@@ -541,9 +548,17 @@ struct ibmvfc_crq_queue {
541 dma_addr_t msg_token; 548 dma_addr_t msg_token;
542}; 549};
543 550
551enum ibmvfc_ae_link_state {
552 IBMVFC_AE_LS_LINK_UP = 0x01,
553 IBMVFC_AE_LS_LINK_BOUNCED = 0x02,
554 IBMVFC_AE_LS_LINK_DOWN = 0x04,
555 IBMVFC_AE_LS_LINK_DEAD = 0x08,
556};
557
544struct ibmvfc_async_crq { 558struct ibmvfc_async_crq {
545 volatile u8 valid; 559 volatile u8 valid;
546 u8 pad[3]; 560 u8 link_state;
561 u8 pad[2];
547 u32 pad2; 562 u32 pad2;
548 volatile u64 event; 563 volatile u64 event;
549 volatile u64 scsi_id; 564 volatile u64 scsi_id;
@@ -561,6 +576,7 @@ struct ibmvfc_async_crq_queue {
561union ibmvfc_iu { 576union ibmvfc_iu {
562 struct ibmvfc_mad_common mad_common; 577 struct ibmvfc_mad_common mad_common;
563 struct ibmvfc_npiv_login_mad npiv_login; 578 struct ibmvfc_npiv_login_mad npiv_login;
579 struct ibmvfc_npiv_logout_mad npiv_logout;
564 struct ibmvfc_discover_targets discover_targets; 580 struct ibmvfc_discover_targets discover_targets;
565 struct ibmvfc_port_login plogi; 581 struct ibmvfc_port_login plogi;
566 struct ibmvfc_process_login prli; 582 struct ibmvfc_process_login prli;
@@ -575,7 +591,6 @@ enum ibmvfc_target_action {
575 IBMVFC_TGT_ACTION_NONE = 0, 591 IBMVFC_TGT_ACTION_NONE = 0,
576 IBMVFC_TGT_ACTION_INIT, 592 IBMVFC_TGT_ACTION_INIT,
577 IBMVFC_TGT_ACTION_INIT_WAIT, 593 IBMVFC_TGT_ACTION_INIT_WAIT,
578 IBMVFC_TGT_ACTION_ADD_RPORT,
579 IBMVFC_TGT_ACTION_DEL_RPORT, 594 IBMVFC_TGT_ACTION_DEL_RPORT,
580}; 595};
581 596
@@ -588,7 +603,9 @@ struct ibmvfc_target {
588 int target_id; 603 int target_id;
589 enum ibmvfc_target_action action; 604 enum ibmvfc_target_action action;
590 int need_login; 605 int need_login;
606 int add_rport;
591 int init_retries; 607 int init_retries;
608 int logo_rcvd;
592 u32 cancel_key; 609 u32 cancel_key;
593 struct ibmvfc_service_parms service_parms; 610 struct ibmvfc_service_parms service_parms;
594 struct ibmvfc_service_parms service_parms_change; 611 struct ibmvfc_service_parms service_parms_change;
@@ -627,6 +644,8 @@ struct ibmvfc_event_pool {
627 644
628enum ibmvfc_host_action { 645enum ibmvfc_host_action {
629 IBMVFC_HOST_ACTION_NONE = 0, 646 IBMVFC_HOST_ACTION_NONE = 0,
647 IBMVFC_HOST_ACTION_LOGO,
648 IBMVFC_HOST_ACTION_LOGO_WAIT,
630 IBMVFC_HOST_ACTION_INIT, 649 IBMVFC_HOST_ACTION_INIT,
631 IBMVFC_HOST_ACTION_INIT_WAIT, 650 IBMVFC_HOST_ACTION_INIT_WAIT,
632 IBMVFC_HOST_ACTION_QUERY, 651 IBMVFC_HOST_ACTION_QUERY,
@@ -635,7 +654,6 @@ enum ibmvfc_host_action {
635 IBMVFC_HOST_ACTION_ALLOC_TGTS, 654 IBMVFC_HOST_ACTION_ALLOC_TGTS,
636 IBMVFC_HOST_ACTION_TGT_INIT, 655 IBMVFC_HOST_ACTION_TGT_INIT,
637 IBMVFC_HOST_ACTION_TGT_DEL_FAILED, 656 IBMVFC_HOST_ACTION_TGT_DEL_FAILED,
638 IBMVFC_HOST_ACTION_TGT_ADD,
639}; 657};
640 658
641enum ibmvfc_host_state { 659enum ibmvfc_host_state {
@@ -682,6 +700,8 @@ struct ibmvfc_host {
682 int client_migrated; 700 int client_migrated;
683 int reinit; 701 int reinit;
684 int delay_init; 702 int delay_init;
703 int scan_complete;
704 int logged_in;
685 int events_to_log; 705 int events_to_log;
686#define IBMVFC_AE_LINKUP 0x0001 706#define IBMVFC_AE_LINKUP 0x0001
687#define IBMVFC_AE_LINKDOWN 0x0002 707#define IBMVFC_AE_LINKDOWN 0x0002
@@ -692,6 +712,7 @@ struct ibmvfc_host {
692 void (*job_step) (struct ibmvfc_host *); 712 void (*job_step) (struct ibmvfc_host *);
693 struct task_struct *work_thread; 713 struct task_struct *work_thread;
694 struct tasklet_struct tasklet; 714 struct tasklet_struct tasklet;
715 struct work_struct rport_add_work_q;
695 wait_queue_head_t init_wait_q; 716 wait_queue_head_t init_wait_q;
696 wait_queue_head_t work_wait_q; 717 wait_queue_head_t work_wait_q;
697}; 718};
@@ -707,6 +728,12 @@ struct ibmvfc_host {
707#define tgt_err(t, fmt, ...) \ 728#define tgt_err(t, fmt, ...) \
708 dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__) 729 dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
709 730
731#define tgt_log(t, level, fmt, ...) \
732 do { \
733 if ((t)->vhost->log_level >= level) \
734 tgt_err(t, fmt, ##__VA_ARGS__); \
735 } while (0)
736
710#define ibmvfc_dbg(vhost, ...) \ 737#define ibmvfc_dbg(vhost, ...) \
711 DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__)) 738 DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
712 739
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index c9aa7611e408..869a11bdccbd 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -70,6 +70,7 @@
70#include <linux/moduleparam.h> 70#include <linux/moduleparam.h>
71#include <linux/dma-mapping.h> 71#include <linux/dma-mapping.h>
72#include <linux/delay.h> 72#include <linux/delay.h>
73#include <linux/of.h>
73#include <asm/firmware.h> 74#include <asm/firmware.h>
74#include <asm/vio.h> 75#include <asm/vio.h>
75#include <asm/firmware.h> 76#include <asm/firmware.h>
@@ -87,9 +88,15 @@
87 */ 88 */
88static int max_id = 64; 89static int max_id = 64;
89static int max_channel = 3; 90static int max_channel = 3;
90static int init_timeout = 5; 91static int init_timeout = 300;
92static int login_timeout = 60;
93static int info_timeout = 30;
94static int abort_timeout = 60;
95static int reset_timeout = 60;
91static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; 96static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
92static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; 97static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
98static int fast_fail = 1;
99static int client_reserve = 1;
93 100
94static struct scsi_transport_template *ibmvscsi_transport_template; 101static struct scsi_transport_template *ibmvscsi_transport_template;
95 102
@@ -110,6 +117,10 @@ module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
110MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); 117MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
111module_param_named(max_requests, max_requests, int, S_IRUGO); 118module_param_named(max_requests, max_requests, int, S_IRUGO);
112MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); 119MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
120module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
121MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
122module_param_named(client_reserve, client_reserve, int, S_IRUGO );
123MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
113 124
114/* ------------------------------------------------------------ 125/* ------------------------------------------------------------
115 * Routines for the event pool and event structs 126 * Routines for the event pool and event structs
@@ -781,105 +792,53 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
781/* ------------------------------------------------------------ 792/* ------------------------------------------------------------
782 * Routines for driver initialization 793 * Routines for driver initialization
783 */ 794 */
795
784/** 796/**
785 * adapter_info_rsp: - Handle response to MAD adapter info request 797 * map_persist_bufs: - Pre-map persistent data for adapter logins
786 * @evt_struct: srp_event_struct with the response 798 * @hostdata: ibmvscsi_host_data of host
787 * 799 *
788 * Used as a "done" callback by when sending adapter_info. Gets called 800 * Map the capabilities and adapter info DMA buffers to avoid runtime failures.
789 * by ibmvscsi_handle_crq() 801 * Return 1 on error, 0 on success.
790*/ 802 */
791static void adapter_info_rsp(struct srp_event_struct *evt_struct) 803static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
792{ 804{
793 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
794 dma_unmap_single(hostdata->dev,
795 evt_struct->iu.mad.adapter_info.buffer,
796 evt_struct->iu.mad.adapter_info.common.length,
797 DMA_BIDIRECTIONAL);
798 805
799 if (evt_struct->xfer_iu->mad.adapter_info.common.status) { 806 hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
800 dev_err(hostdata->dev, "error %d getting adapter info\n", 807 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
801 evt_struct->xfer_iu->mad.adapter_info.common.status); 808
802 } else { 809 if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
803 dev_info(hostdata->dev, "host srp version: %s, " 810 dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
804 "host partition %s (%d), OS %d, max io %u\n", 811 return 1;
805 hostdata->madapter_info.srp_version,
806 hostdata->madapter_info.partition_name,
807 hostdata->madapter_info.partition_number,
808 hostdata->madapter_info.os_type,
809 hostdata->madapter_info.port_max_txu[0]);
810
811 if (hostdata->madapter_info.port_max_txu[0])
812 hostdata->host->max_sectors =
813 hostdata->madapter_info.port_max_txu[0] >> 9;
814
815 if (hostdata->madapter_info.os_type == 3 &&
816 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
817 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
818 hostdata->madapter_info.srp_version);
819 dev_err(hostdata->dev, "limiting scatterlists to %d\n",
820 MAX_INDIRECT_BUFS);
821 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
822 }
823 } 812 }
813
814 hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
815 &hostdata->madapter_info,
816 sizeof(hostdata->madapter_info),
817 DMA_BIDIRECTIONAL);
818 if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
819 dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
820 dma_unmap_single(hostdata->dev, hostdata->caps_addr,
821 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
822 return 1;
823 }
824
825 return 0;
824} 826}
825 827
826/** 828/**
827 * send_mad_adapter_info: - Sends the mad adapter info request 829 * unmap_persist_bufs: - Unmap persistent data needed for adapter logins
828 * and stores the result so it can be retrieved with 830 * @hostdata: ibmvscsi_host_data of host
829 * sysfs. We COULD consider causing a failure if the 831 *
830 * returned SRP version doesn't match ours. 832 * Unmap the capabilities and adapter info DMA buffers
831 * @hostdata: ibmvscsi_host_data of host 833 */
832 * 834static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
833 * Returns zero if successful.
834*/
835static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
836{ 835{
837 struct viosrp_adapter_info *req; 836 dma_unmap_single(hostdata->dev, hostdata->caps_addr,
838 struct srp_event_struct *evt_struct; 837 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
839 unsigned long flags;
840 dma_addr_t addr;
841
842 evt_struct = get_event_struct(&hostdata->pool);
843 if (!evt_struct) {
844 dev_err(hostdata->dev,
845 "couldn't allocate an event for ADAPTER_INFO_REQ!\n");
846 return;
847 }
848
849 init_event_struct(evt_struct,
850 adapter_info_rsp,
851 VIOSRP_MAD_FORMAT,
852 init_timeout);
853
854 req = &evt_struct->iu.mad.adapter_info;
855 memset(req, 0x00, sizeof(*req));
856
857 req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
858 req->common.length = sizeof(hostdata->madapter_info);
859 req->buffer = addr = dma_map_single(hostdata->dev,
860 &hostdata->madapter_info,
861 sizeof(hostdata->madapter_info),
862 DMA_BIDIRECTIONAL);
863 838
864 if (dma_mapping_error(hostdata->dev, req->buffer)) { 839 dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
865 if (!firmware_has_feature(FW_FEATURE_CMO)) 840 sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
866 dev_err(hostdata->dev, 841}
867 "Unable to map request_buffer for "
868 "adapter_info!\n");
869 free_event_struct(&hostdata->pool, evt_struct);
870 return;
871 }
872
873 spin_lock_irqsave(hostdata->host->host_lock, flags);
874 if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
875 dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
876 dma_unmap_single(hostdata->dev,
877 addr,
878 sizeof(hostdata->madapter_info),
879 DMA_BIDIRECTIONAL);
880 }
881 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
882};
883 842
884/** 843/**
885 * login_rsp: - Handle response to SRP login request 844 * login_rsp: - Handle response to SRP login request
@@ -909,9 +868,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
909 } 868 }
910 869
911 dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); 870 dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
912 871 hostdata->client_migrated = 0;
913 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
914 dev_err(hostdata->dev, "Invalid request_limit.\n");
915 872
916 /* Now we know what the real request-limit is. 873 /* Now we know what the real request-limit is.
917 * This value is set rather than added to request_limit because 874 * This value is set rather than added to request_limit because
@@ -922,15 +879,12 @@ static void login_rsp(struct srp_event_struct *evt_struct)
922 879
923 /* If we had any pending I/Os, kick them */ 880 /* If we had any pending I/Os, kick them */
924 scsi_unblock_requests(hostdata->host); 881 scsi_unblock_requests(hostdata->host);
925
926 send_mad_adapter_info(hostdata);
927 return;
928} 882}
929 883
930/** 884/**
931 * send_srp_login: - Sends the srp login 885 * send_srp_login: - Sends the srp login
932 * @hostdata: ibmvscsi_host_data of host 886 * @hostdata: ibmvscsi_host_data of host
933 * 887 *
934 * Returns zero if successful. 888 * Returns zero if successful.
935*/ 889*/
936static int send_srp_login(struct ibmvscsi_host_data *hostdata) 890static int send_srp_login(struct ibmvscsi_host_data *hostdata)
@@ -939,22 +893,17 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
939 unsigned long flags; 893 unsigned long flags;
940 struct srp_login_req *login; 894 struct srp_login_req *login;
941 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); 895 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
942 if (!evt_struct) {
943 dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
944 return FAILED;
945 }
946 896
947 init_event_struct(evt_struct, 897 BUG_ON(!evt_struct);
948 login_rsp, 898 init_event_struct(evt_struct, login_rsp,
949 VIOSRP_SRP_FORMAT, 899 VIOSRP_SRP_FORMAT, login_timeout);
950 init_timeout);
951 900
952 login = &evt_struct->iu.srp.login_req; 901 login = &evt_struct->iu.srp.login_req;
953 memset(login, 0x00, sizeof(struct srp_login_req)); 902 memset(login, 0, sizeof(*login));
954 login->opcode = SRP_LOGIN_REQ; 903 login->opcode = SRP_LOGIN_REQ;
955 login->req_it_iu_len = sizeof(union srp_iu); 904 login->req_it_iu_len = sizeof(union srp_iu);
956 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; 905 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
957 906
958 spin_lock_irqsave(hostdata->host->host_lock, flags); 907 spin_lock_irqsave(hostdata->host->host_lock, flags);
959 /* Start out with a request limit of 0, since this is negotiated in 908 /* Start out with a request limit of 0, since this is negotiated in
960 * the login request we are just sending and login requests always 909 * the login request we are just sending and login requests always
@@ -962,13 +911,241 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
962 */ 911 */
963 atomic_set(&hostdata->request_limit, 0); 912 atomic_set(&hostdata->request_limit, 0);
964 913
965 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); 914 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
966 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 915 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
967 dev_info(hostdata->dev, "sent SRP login\n"); 916 dev_info(hostdata->dev, "sent SRP login\n");
968 return rc; 917 return rc;
969}; 918};
970 919
971/** 920/**
921 * capabilities_rsp: - Handle response to MAD adapter capabilities request
922 * @evt_struct: srp_event_struct with the response
923 *
924 * Used as a "done" callback by when sending adapter_info.
925 */
926static void capabilities_rsp(struct srp_event_struct *evt_struct)
927{
928 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
929
930 if (evt_struct->xfer_iu->mad.capabilities.common.status) {
931 dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
932 evt_struct->xfer_iu->mad.capabilities.common.status);
933 } else {
934 if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
935 dev_info(hostdata->dev, "Partition migration not supported\n");
936
937 if (client_reserve) {
938 if (hostdata->caps.reserve.common.server_support ==
939 SERVER_SUPPORTS_CAP)
940 dev_info(hostdata->dev, "Client reserve enabled\n");
941 else
942 dev_info(hostdata->dev, "Client reserve not supported\n");
943 }
944 }
945
946 send_srp_login(hostdata);
947}
948
949/**
950 * send_mad_capabilities: - Sends the mad capabilities request
951 * and stores the result so it can be retrieved with
952 * @hostdata: ibmvscsi_host_data of host
953 */
954static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
955{
956 struct viosrp_capabilities *req;
957 struct srp_event_struct *evt_struct;
958 unsigned long flags;
959 struct device_node *of_node = hostdata->dev->archdata.of_node;
960 const char *location;
961
962 evt_struct = get_event_struct(&hostdata->pool);
963 BUG_ON(!evt_struct);
964
965 init_event_struct(evt_struct, capabilities_rsp,
966 VIOSRP_MAD_FORMAT, info_timeout);
967
968 req = &evt_struct->iu.mad.capabilities;
969 memset(req, 0, sizeof(*req));
970
971 hostdata->caps.flags = CAP_LIST_SUPPORTED;
972 if (hostdata->client_migrated)
973 hostdata->caps.flags |= CLIENT_MIGRATED;
974
975 strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
976 sizeof(hostdata->caps.name));
977 hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
978
979 location = of_get_property(of_node, "ibm,loc-code", NULL);
980 location = location ? location : dev_name(hostdata->dev);
981 strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
982 hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
983
984 req->common.type = VIOSRP_CAPABILITIES_TYPE;
985 req->buffer = hostdata->caps_addr;
986
987 hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
988 hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
989 hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
990 hostdata->caps.migration.ecl = 1;
991
992 if (client_reserve) {
993 hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
994 hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
995 hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
996 hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
997 req->common.length = sizeof(hostdata->caps);
998 } else
999 req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
1000
1001 spin_lock_irqsave(hostdata->host->host_lock, flags);
1002 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1003 dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
1004 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1005};
1006
1007/**
1008 * fast_fail_rsp: - Handle response to MAD enable fast fail
1009 * @evt_struct: srp_event_struct with the response
1010 *
1011 * Used as a "done" callback by when sending enable fast fail. Gets called
1012 * by ibmvscsi_handle_crq()
1013 */
1014static void fast_fail_rsp(struct srp_event_struct *evt_struct)
1015{
1016 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1017 u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
1018
1019 if (status == VIOSRP_MAD_NOT_SUPPORTED)
1020 dev_err(hostdata->dev, "fast_fail not supported in server\n");
1021 else if (status == VIOSRP_MAD_FAILED)
1022 dev_err(hostdata->dev, "fast_fail request failed\n");
1023 else if (status != VIOSRP_MAD_SUCCESS)
1024 dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
1025
1026 send_mad_capabilities(hostdata);
1027}
1028
1029/**
1030 * init_host - Start host initialization
1031 * @hostdata: ibmvscsi_host_data of host
1032 *
1033 * Returns zero if successful.
1034 */
1035static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
1036{
1037 int rc;
1038 unsigned long flags;
1039 struct viosrp_fast_fail *fast_fail_mad;
1040 struct srp_event_struct *evt_struct;
1041
1042 if (!fast_fail) {
1043 send_mad_capabilities(hostdata);
1044 return 0;
1045 }
1046
1047 evt_struct = get_event_struct(&hostdata->pool);
1048 BUG_ON(!evt_struct);
1049
1050 init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
1051
1052 fast_fail_mad = &evt_struct->iu.mad.fast_fail;
1053 memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
1054 fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
1055 fast_fail_mad->common.length = sizeof(*fast_fail_mad);
1056
1057 spin_lock_irqsave(hostdata->host->host_lock, flags);
1058 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1059 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1060 return rc;
1061}
1062
1063/**
1064 * adapter_info_rsp: - Handle response to MAD adapter info request
1065 * @evt_struct: srp_event_struct with the response
1066 *
1067 * Used as a "done" callback by when sending adapter_info. Gets called
1068 * by ibmvscsi_handle_crq()
1069*/
1070static void adapter_info_rsp(struct srp_event_struct *evt_struct)
1071{
1072 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1073
1074 if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
1075 dev_err(hostdata->dev, "error %d getting adapter info\n",
1076 evt_struct->xfer_iu->mad.adapter_info.common.status);
1077 } else {
1078 dev_info(hostdata->dev, "host srp version: %s, "
1079 "host partition %s (%d), OS %d, max io %u\n",
1080 hostdata->madapter_info.srp_version,
1081 hostdata->madapter_info.partition_name,
1082 hostdata->madapter_info.partition_number,
1083 hostdata->madapter_info.os_type,
1084 hostdata->madapter_info.port_max_txu[0]);
1085
1086 if (hostdata->madapter_info.port_max_txu[0])
1087 hostdata->host->max_sectors =
1088 hostdata->madapter_info.port_max_txu[0] >> 9;
1089
1090 if (hostdata->madapter_info.os_type == 3 &&
1091 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
1092 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
1093 hostdata->madapter_info.srp_version);
1094 dev_err(hostdata->dev, "limiting scatterlists to %d\n",
1095 MAX_INDIRECT_BUFS);
1096 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
1097 }
1098 }
1099
1100 enable_fast_fail(hostdata);
1101}
1102
1103/**
1104 * send_mad_adapter_info: - Sends the mad adapter info request
1105 * and stores the result so it can be retrieved with
1106 * sysfs. We COULD consider causing a failure if the
1107 * returned SRP version doesn't match ours.
1108 * @hostdata: ibmvscsi_host_data of host
1109 *
1110 * Returns zero if successful.
1111*/
1112static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
1113{
1114 struct viosrp_adapter_info *req;
1115 struct srp_event_struct *evt_struct;
1116 unsigned long flags;
1117
1118 evt_struct = get_event_struct(&hostdata->pool);
1119 BUG_ON(!evt_struct);
1120
1121 init_event_struct(evt_struct,
1122 adapter_info_rsp,
1123 VIOSRP_MAD_FORMAT,
1124 info_timeout);
1125
1126 req = &evt_struct->iu.mad.adapter_info;
1127 memset(req, 0x00, sizeof(*req));
1128
1129 req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
1130 req->common.length = sizeof(hostdata->madapter_info);
1131 req->buffer = hostdata->adapter_info_addr;
1132
1133 spin_lock_irqsave(hostdata->host->host_lock, flags);
1134 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1135 dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
1136 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1137};
1138
1139/**
1140 * init_adapter: Start virtual adapter initialization sequence
1141 *
1142 */
1143static void init_adapter(struct ibmvscsi_host_data *hostdata)
1144{
1145 send_mad_adapter_info(hostdata);
1146}
1147
1148/**
972 * sync_completion: Signal that a synchronous command has completed 1149 * sync_completion: Signal that a synchronous command has completed
973 * Note that after returning from this call, the evt_struct is freed. 1150 * Note that after returning from this call, the evt_struct is freed.
974 * the caller waiting on this completion shouldn't touch the evt_struct 1151 * the caller waiting on this completion shouldn't touch the evt_struct
@@ -1029,7 +1206,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1029 init_event_struct(evt, 1206 init_event_struct(evt,
1030 sync_completion, 1207 sync_completion,
1031 VIOSRP_SRP_FORMAT, 1208 VIOSRP_SRP_FORMAT,
1032 init_timeout); 1209 abort_timeout);
1033 1210
1034 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1211 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1035 1212
@@ -1043,7 +1220,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1043 evt->sync_srp = &srp_rsp; 1220 evt->sync_srp = &srp_rsp;
1044 1221
1045 init_completion(&evt->comp); 1222 init_completion(&evt->comp);
1046 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); 1223 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
1047 1224
1048 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) 1225 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1049 break; 1226 break;
@@ -1152,7 +1329,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1152 init_event_struct(evt, 1329 init_event_struct(evt,
1153 sync_completion, 1330 sync_completion,
1154 VIOSRP_SRP_FORMAT, 1331 VIOSRP_SRP_FORMAT,
1155 init_timeout); 1332 reset_timeout);
1156 1333
1157 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1334 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1158 1335
@@ -1165,7 +1342,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1165 evt->sync_srp = &srp_rsp; 1342 evt->sync_srp = &srp_rsp;
1166 1343
1167 init_completion(&evt->comp); 1344 init_completion(&evt->comp);
1168 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); 1345 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
1169 1346
1170 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) 1347 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1171 break; 1348 break;
@@ -1281,7 +1458,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1281 if ((rc = ibmvscsi_ops->send_crq(hostdata, 1458 if ((rc = ibmvscsi_ops->send_crq(hostdata,
1282 0xC002000000000000LL, 0)) == 0) { 1459 0xC002000000000000LL, 0)) == 0) {
1283 /* Now login */ 1460 /* Now login */
1284 send_srp_login(hostdata); 1461 init_adapter(hostdata);
1285 } else { 1462 } else {
1286 dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc); 1463 dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
1287 } 1464 }
@@ -1291,7 +1468,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1291 dev_info(hostdata->dev, "partner initialization complete\n"); 1468 dev_info(hostdata->dev, "partner initialization complete\n");
1292 1469
1293 /* Now login */ 1470 /* Now login */
1294 send_srp_login(hostdata); 1471 init_adapter(hostdata);
1295 break; 1472 break;
1296 default: 1473 default:
1297 dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format); 1474 dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
@@ -1303,6 +1480,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1303 if (crq->format == 0x06) { 1480 if (crq->format == 0x06) {
1304 /* We need to re-setup the interpartition connection */ 1481 /* We need to re-setup the interpartition connection */
1305 dev_info(hostdata->dev, "Re-enabling adapter!\n"); 1482 dev_info(hostdata->dev, "Re-enabling adapter!\n");
1483 hostdata->client_migrated = 1;
1306 purge_requests(hostdata, DID_REQUEUE); 1484 purge_requests(hostdata, DID_REQUEUE);
1307 if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, 1485 if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue,
1308 hostdata)) || 1486 hostdata)) ||
@@ -1397,7 +1575,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1397 init_event_struct(evt_struct, 1575 init_event_struct(evt_struct,
1398 sync_completion, 1576 sync_completion,
1399 VIOSRP_MAD_FORMAT, 1577 VIOSRP_MAD_FORMAT,
1400 init_timeout); 1578 info_timeout);
1401 1579
1402 host_config = &evt_struct->iu.mad.host_config; 1580 host_config = &evt_struct->iu.mad.host_config;
1403 1581
@@ -1419,7 +1597,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1419 1597
1420 init_completion(&evt_struct->comp); 1598 init_completion(&evt_struct->comp);
1421 spin_lock_irqsave(hostdata->host->host_lock, flags); 1599 spin_lock_irqsave(hostdata->host->host_lock, flags);
1422 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); 1600 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1423 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1601 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1424 if (rc == 0) 1602 if (rc == 0)
1425 wait_for_completion(&evt_struct->comp); 1603 wait_for_completion(&evt_struct->comp);
@@ -1444,7 +1622,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1444 spin_lock_irqsave(shost->host_lock, lock_flags); 1622 spin_lock_irqsave(shost->host_lock, lock_flags);
1445 if (sdev->type == TYPE_DISK) { 1623 if (sdev->type == TYPE_DISK) {
1446 sdev->allow_restart = 1; 1624 sdev->allow_restart = 1;
1447 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); 1625 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
1448 } 1626 }
1449 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); 1627 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1450 spin_unlock_irqrestore(shost->host_lock, lock_flags); 1628 spin_unlock_irqrestore(shost->host_lock, lock_flags);
@@ -1471,6 +1649,46 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
1471/* ------------------------------------------------------------ 1649/* ------------------------------------------------------------
1472 * sysfs attributes 1650 * sysfs attributes
1473 */ 1651 */
1652static ssize_t show_host_vhost_loc(struct device *dev,
1653 struct device_attribute *attr, char *buf)
1654{
1655 struct Scsi_Host *shost = class_to_shost(dev);
1656 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1657 int len;
1658
1659 len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
1660 hostdata->caps.loc);
1661 return len;
1662}
1663
1664static struct device_attribute ibmvscsi_host_vhost_loc = {
1665 .attr = {
1666 .name = "vhost_loc",
1667 .mode = S_IRUGO,
1668 },
1669 .show = show_host_vhost_loc,
1670};
1671
1672static ssize_t show_host_vhost_name(struct device *dev,
1673 struct device_attribute *attr, char *buf)
1674{
1675 struct Scsi_Host *shost = class_to_shost(dev);
1676 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1677 int len;
1678
1679 len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
1680 hostdata->caps.name);
1681 return len;
1682}
1683
1684static struct device_attribute ibmvscsi_host_vhost_name = {
1685 .attr = {
1686 .name = "vhost_name",
1687 .mode = S_IRUGO,
1688 },
1689 .show = show_host_vhost_name,
1690};
1691
1474static ssize_t show_host_srp_version(struct device *dev, 1692static ssize_t show_host_srp_version(struct device *dev,
1475 struct device_attribute *attr, char *buf) 1693 struct device_attribute *attr, char *buf)
1476{ 1694{
@@ -1594,6 +1812,8 @@ static struct device_attribute ibmvscsi_host_config = {
1594}; 1812};
1595 1813
1596static struct device_attribute *ibmvscsi_attrs[] = { 1814static struct device_attribute *ibmvscsi_attrs[] = {
1815 &ibmvscsi_host_vhost_loc,
1816 &ibmvscsi_host_vhost_name,
1597 &ibmvscsi_host_srp_version, 1817 &ibmvscsi_host_srp_version,
1598 &ibmvscsi_host_partition_name, 1818 &ibmvscsi_host_partition_name,
1599 &ibmvscsi_host_partition_number, 1819 &ibmvscsi_host_partition_number,
@@ -1657,7 +1877,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1657 unsigned long wait_switch = 0; 1877 unsigned long wait_switch = 0;
1658 int rc; 1878 int rc;
1659 1879
1660 vdev->dev.driver_data = NULL; 1880 dev_set_drvdata(&vdev->dev, NULL);
1661 1881
1662 host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); 1882 host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
1663 if (!host) { 1883 if (!host) {
@@ -1674,6 +1894,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1674 atomic_set(&hostdata->request_limit, -1); 1894 atomic_set(&hostdata->request_limit, -1);
1675 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; 1895 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
1676 1896
1897 if (map_persist_bufs(hostdata)) {
1898 dev_err(&vdev->dev, "couldn't map persistent buffers\n");
1899 goto persist_bufs_failed;
1900 }
1901
1677 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); 1902 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
1678 if (rc != 0 && rc != H_RESOURCE) { 1903 if (rc != 0 && rc != H_RESOURCE) {
1679 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); 1904 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
@@ -1687,6 +1912,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1687 host->max_lun = 8; 1912 host->max_lun = 8;
1688 host->max_id = max_id; 1913 host->max_id = max_id;
1689 host->max_channel = max_channel; 1914 host->max_channel = max_channel;
1915 host->max_cmd_len = 16;
1690 1916
1691 if (scsi_add_host(hostdata->host, hostdata->dev)) 1917 if (scsi_add_host(hostdata->host, hostdata->dev))
1692 goto add_host_failed; 1918 goto add_host_failed;
@@ -1723,7 +1949,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1723 scsi_scan_host(host); 1949 scsi_scan_host(host);
1724 } 1950 }
1725 1951
1726 vdev->dev.driver_data = hostdata; 1952 dev_set_drvdata(&vdev->dev, hostdata);
1727 return 0; 1953 return 0;
1728 1954
1729 add_srp_port_failed: 1955 add_srp_port_failed:
@@ -1733,6 +1959,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1733 init_pool_failed: 1959 init_pool_failed:
1734 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); 1960 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
1735 init_crq_failed: 1961 init_crq_failed:
1962 unmap_persist_bufs(hostdata);
1963 persist_bufs_failed:
1736 scsi_host_put(host); 1964 scsi_host_put(host);
1737 scsi_host_alloc_failed: 1965 scsi_host_alloc_failed:
1738 return -1; 1966 return -1;
@@ -1740,7 +1968,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1740 1968
1741static int ibmvscsi_remove(struct vio_dev *vdev) 1969static int ibmvscsi_remove(struct vio_dev *vdev)
1742{ 1970{
1743 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; 1971 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
1972 unmap_persist_bufs(hostdata);
1744 release_event_pool(&hostdata->pool, hostdata); 1973 release_event_pool(&hostdata->pool, hostdata);
1745 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, 1974 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
1746 max_events); 1975 max_events);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 2d4339d5e16e..76425303def0 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -90,6 +90,7 @@ struct event_pool {
90/* all driver data associated with a host adapter */ 90/* all driver data associated with a host adapter */
91struct ibmvscsi_host_data { 91struct ibmvscsi_host_data {
92 atomic_t request_limit; 92 atomic_t request_limit;
93 int client_migrated;
93 struct device *dev; 94 struct device *dev;
94 struct event_pool pool; 95 struct event_pool pool;
95 struct crq_queue queue; 96 struct crq_queue queue;
@@ -97,6 +98,9 @@ struct ibmvscsi_host_data {
97 struct list_head sent; 98 struct list_head sent;
98 struct Scsi_Host *host; 99 struct Scsi_Host *host;
99 struct mad_adapter_info_data madapter_info; 100 struct mad_adapter_info_data madapter_info;
101 struct capabilities caps;
102 dma_addr_t caps_addr;
103 dma_addr_t adapter_info_addr;
100}; 104};
101 105
102/* routines for managing a command/response queue */ 106/* routines for managing a command/response queue */
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index e2dd6a45924a..d5eaf9727109 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -892,7 +892,7 @@ free_vport:
892 892
893static int ibmvstgt_remove(struct vio_dev *dev) 893static int ibmvstgt_remove(struct vio_dev *dev)
894{ 894{
895 struct srp_target *target = (struct srp_target *) dev->dev.driver_data; 895 struct srp_target *target = dev_get_drvdata(&dev->dev);
896 struct Scsi_Host *shost = target->shost; 896 struct Scsi_Host *shost = target->shost;
897 struct vio_port *vport = target->ldata; 897 struct vio_port *vport = target->ldata;
898 898
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 204604501ad8..2cd735d1d196 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -37,6 +37,7 @@
37 37
38#define SRP_VERSION "16.a" 38#define SRP_VERSION "16.a"
39#define SRP_MAX_IU_LEN 256 39#define SRP_MAX_IU_LEN 256
40#define SRP_MAX_LOC_LEN 32
40 41
41union srp_iu { 42union srp_iu {
42 struct srp_login_req login_req; 43 struct srp_login_req login_req;
@@ -86,7 +87,37 @@ enum viosrp_mad_types {
86 VIOSRP_EMPTY_IU_TYPE = 0x01, 87 VIOSRP_EMPTY_IU_TYPE = 0x01,
87 VIOSRP_ERROR_LOG_TYPE = 0x02, 88 VIOSRP_ERROR_LOG_TYPE = 0x02,
88 VIOSRP_ADAPTER_INFO_TYPE = 0x03, 89 VIOSRP_ADAPTER_INFO_TYPE = 0x03,
89 VIOSRP_HOST_CONFIG_TYPE = 0x04 90 VIOSRP_HOST_CONFIG_TYPE = 0x04,
91 VIOSRP_CAPABILITIES_TYPE = 0x05,
92 VIOSRP_ENABLE_FAST_FAIL = 0x08,
93};
94
95enum viosrp_mad_status {
96 VIOSRP_MAD_SUCCESS = 0x00,
97 VIOSRP_MAD_NOT_SUPPORTED = 0xF1,
98 VIOSRP_MAD_FAILED = 0xF7,
99};
100
101enum viosrp_capability_type {
102 MIGRATION_CAPABILITIES = 0x01,
103 RESERVATION_CAPABILITIES = 0x02,
104};
105
106enum viosrp_capability_support {
107 SERVER_DOES_NOT_SUPPORTS_CAP = 0x0,
108 SERVER_SUPPORTS_CAP = 0x01,
109 SERVER_CAP_DATA = 0x02,
110};
111
112enum viosrp_reserve_type {
113 CLIENT_RESERVE_SCSI_2 = 0x01,
114};
115
116enum viosrp_capability_flag {
117 CLIENT_MIGRATED = 0x01,
118 CLIENT_RECONNECT = 0x02,
119 CAP_LIST_SUPPORTED = 0x04,
120 CAP_LIST_DATA = 0x08,
90}; 121};
91 122
92/* 123/*
@@ -127,11 +158,46 @@ struct viosrp_host_config {
127 u64 buffer; 158 u64 buffer;
128}; 159};
129 160
161struct viosrp_fast_fail {
162 struct mad_common common;
163};
164
165struct viosrp_capabilities {
166 struct mad_common common;
167 u64 buffer;
168};
169
170struct mad_capability_common {
171 u32 cap_type;
172 u16 length;
173 u16 server_support;
174};
175
176struct mad_reserve_cap {
177 struct mad_capability_common common;
178 u32 type;
179};
180
181struct mad_migration_cap {
182 struct mad_capability_common common;
183 u32 ecl;
184};
185
186struct capabilities{
187 u32 flags;
188 char name[SRP_MAX_LOC_LEN];
189 char loc[SRP_MAX_LOC_LEN];
190 struct mad_migration_cap migration;
191 struct mad_reserve_cap reserve;
192};
193
130union mad_iu { 194union mad_iu {
131 struct viosrp_empty_iu empty_iu; 195 struct viosrp_empty_iu empty_iu;
132 struct viosrp_error_log error_log; 196 struct viosrp_error_log error_log;
133 struct viosrp_adapter_info adapter_info; 197 struct viosrp_adapter_info adapter_info;
134 struct viosrp_host_config host_config; 198 struct viosrp_host_config host_config;
199 struct viosrp_fast_fail fast_fail;
200 struct viosrp_capabilities capabilities;
135}; 201};
136 202
137union viosrp_iu { 203union viosrp_iu {
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index dd689ded8609..5f045505a1f4 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -131,13 +131,13 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
131}; 131};
132 132
133static const struct ipr_chip_t ipr_chip[] = { 133static const struct ipr_chip_t ipr_chip[] = {
134 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] }, 134 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] }, 135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] }, 136 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] }, 137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] },
138 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] }, 138 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, &ipr_chip_cfg[0] },
139 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] }, 139 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, &ipr_chip_cfg[1] },
140 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] } 140 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, &ipr_chip_cfg[1] }
141}; 141};
142 142
143static int ipr_max_bus_speeds [] = { 143static int ipr_max_bus_speeds [] = {
@@ -7003,6 +7003,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
7003 ioa_cfg->sdt_state = ABORT_DUMP; 7003 ioa_cfg->sdt_state = ABORT_DUMP;
7004 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; 7004 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7005 ioa_cfg->in_ioa_bringdown = 1; 7005 ioa_cfg->in_ioa_bringdown = 1;
7006 ioa_cfg->allow_cmds = 0;
7006 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 7007 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7007 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 7008 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7008} 7009}
@@ -7366,6 +7367,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7366 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 7367 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
7367 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); 7368 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
7368 init_waitqueue_head(&ioa_cfg->reset_wait_q); 7369 init_waitqueue_head(&ioa_cfg->reset_wait_q);
7370 init_waitqueue_head(&ioa_cfg->msi_wait_q);
7369 ioa_cfg->sdt_state = INACTIVE; 7371 ioa_cfg->sdt_state = INACTIVE;
7370 if (ipr_enable_cache) 7372 if (ipr_enable_cache)
7371 ioa_cfg->cache_state = CACHE_ENABLED; 7373 ioa_cfg->cache_state = CACHE_ENABLED;
@@ -7397,25 +7399,108 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7397} 7399}
7398 7400
7399/** 7401/**
7400 * ipr_get_chip_cfg - Find adapter chip configuration 7402 * ipr_get_chip_info - Find adapter chip information
7401 * @dev_id: PCI device id struct 7403 * @dev_id: PCI device id struct
7402 * 7404 *
7403 * Return value: 7405 * Return value:
7404 * ptr to chip config on success / NULL on failure 7406 * ptr to chip information on success / NULL on failure
7405 **/ 7407 **/
7406static const struct ipr_chip_cfg_t * __devinit 7408static const struct ipr_chip_t * __devinit
7407ipr_get_chip_cfg(const struct pci_device_id *dev_id) 7409ipr_get_chip_info(const struct pci_device_id *dev_id)
7408{ 7410{
7409 int i; 7411 int i;
7410 7412
7411 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) 7413 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7412 if (ipr_chip[i].vendor == dev_id->vendor && 7414 if (ipr_chip[i].vendor == dev_id->vendor &&
7413 ipr_chip[i].device == dev_id->device) 7415 ipr_chip[i].device == dev_id->device)
7414 return ipr_chip[i].cfg; 7416 return &ipr_chip[i];
7415 return NULL; 7417 return NULL;
7416} 7418}
7417 7419
7418/** 7420/**
7421 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
7422 * @pdev: PCI device struct
7423 *
7424 * Description: Simply set the msi_received flag to 1 indicating that
7425 * Message Signaled Interrupts are supported.
7426 *
7427 * Return value:
7428 * 0 on success / non-zero on failure
7429 **/
7430static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
7431{
7432 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
7433 unsigned long lock_flags = 0;
7434 irqreturn_t rc = IRQ_HANDLED;
7435
7436 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7437
7438 ioa_cfg->msi_received = 1;
7439 wake_up(&ioa_cfg->msi_wait_q);
7440
7441 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7442 return rc;
7443}
7444
7445/**
7446 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
7447 * @pdev: PCI device struct
7448 *
7449 * Description: The return value from pci_enable_msi() can not always be
7450 * trusted. This routine sets up and initiates a test interrupt to determine
7451 * if the interrupt is received via the ipr_test_intr() service routine.
7452 * If the tests fails, the driver will fall back to LSI.
7453 *
7454 * Return value:
7455 * 0 on success / non-zero on failure
7456 **/
7457static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
7458 struct pci_dev *pdev)
7459{
7460 int rc;
7461 volatile u32 int_reg;
7462 unsigned long lock_flags = 0;
7463
7464 ENTER;
7465
7466 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7467 init_waitqueue_head(&ioa_cfg->msi_wait_q);
7468 ioa_cfg->msi_received = 0;
7469 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7470 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg);
7471 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7472 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7473
7474 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
7475 if (rc) {
7476 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
7477 return rc;
7478 } else if (ipr_debug)
7479 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
7480
7481 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg);
7482 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7483 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
7484 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7485
7486 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7487 if (!ioa_cfg->msi_received) {
7488 /* MSI test failed */
7489 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
7490 rc = -EOPNOTSUPP;
7491 } else if (ipr_debug)
7492 dev_info(&pdev->dev, "MSI test succeeded.\n");
7493
7494 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7495
7496 free_irq(pdev->irq, ioa_cfg);
7497
7498 LEAVE;
7499
7500 return rc;
7501}
7502
7503/**
7419 * ipr_probe_ioa - Allocates memory and does first stage of initialization 7504 * ipr_probe_ioa - Allocates memory and does first stage of initialization
7420 * @pdev: PCI device struct 7505 * @pdev: PCI device struct
7421 * @dev_id: PCI device id struct 7506 * @dev_id: PCI device id struct
@@ -7440,11 +7525,6 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7440 goto out; 7525 goto out;
7441 } 7526 }
7442 7527
7443 if (!(rc = pci_enable_msi(pdev)))
7444 dev_info(&pdev->dev, "MSI enabled\n");
7445 else if (ipr_debug)
7446 dev_info(&pdev->dev, "Cannot enable MSI\n");
7447
7448 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); 7528 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7449 7529
7450 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); 7530 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
@@ -7460,14 +7540,16 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7460 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, 7540 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7461 sata_port_info.flags, &ipr_sata_ops); 7541 sata_port_info.flags, &ipr_sata_ops);
7462 7542
7463 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id); 7543 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
7464 7544
7465 if (!ioa_cfg->chip_cfg) { 7545 if (!ioa_cfg->ipr_chip) {
7466 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", 7546 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7467 dev_id->vendor, dev_id->device); 7547 dev_id->vendor, dev_id->device);
7468 goto out_scsi_host_put; 7548 goto out_scsi_host_put;
7469 } 7549 }
7470 7550
7551 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7552
7471 if (ipr_transop_timeout) 7553 if (ipr_transop_timeout)
7472 ioa_cfg->transop_timeout = ipr_transop_timeout; 7554 ioa_cfg->transop_timeout = ipr_transop_timeout;
7473 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) 7555 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
@@ -7518,6 +7600,18 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7518 goto cleanup_nomem; 7600 goto cleanup_nomem;
7519 } 7601 }
7520 7602
7603 /* Enable MSI style interrupts if they are supported. */
7604 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
7605 rc = ipr_test_msi(ioa_cfg, pdev);
7606 if (rc == -EOPNOTSUPP)
7607 pci_disable_msi(pdev);
7608 else if (rc)
7609 goto out_msi_disable;
7610 else
7611 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
7612 } else if (ipr_debug)
7613 dev_info(&pdev->dev, "Cannot enable MSI.\n");
7614
7521 /* Save away PCI config space for use following IOA reset */ 7615 /* Save away PCI config space for use following IOA reset */
7522 rc = pci_save_state(pdev); 7616 rc = pci_save_state(pdev);
7523 7617
@@ -7555,7 +7649,9 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7555 ioa_cfg->ioa_unit_checked = 1; 7649 ioa_cfg->ioa_unit_checked = 1;
7556 7650
7557 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 7651 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7558 rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg); 7652 rc = request_irq(pdev->irq, ipr_isr,
7653 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
7654 IPR_NAME, ioa_cfg);
7559 7655
7560 if (rc) { 7656 if (rc) {
7561 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", 7657 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
@@ -7582,12 +7678,13 @@ cleanup_nolog:
7582 ipr_free_mem(ioa_cfg); 7678 ipr_free_mem(ioa_cfg);
7583cleanup_nomem: 7679cleanup_nomem:
7584 iounmap(ipr_regs); 7680 iounmap(ipr_regs);
7681out_msi_disable:
7682 pci_disable_msi(pdev);
7585out_release_regions: 7683out_release_regions:
7586 pci_release_regions(pdev); 7684 pci_release_regions(pdev);
7587out_scsi_host_put: 7685out_scsi_host_put:
7588 scsi_host_put(host); 7686 scsi_host_put(host);
7589out_disable: 7687out_disable:
7590 pci_disable_msi(pdev);
7591 pci_disable_device(pdev); 7688 pci_disable_device(pdev);
7592 goto out; 7689 goto out;
7593} 7690}
@@ -7688,7 +7785,7 @@ static void __ipr_remove(struct pci_dev *pdev)
7688 * Return value: 7785 * Return value:
7689 * none 7786 * none
7690 **/ 7787 **/
7691static void ipr_remove(struct pci_dev *pdev) 7788static void __devexit ipr_remove(struct pci_dev *pdev)
7692{ 7789{
7693 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 7790 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7694 7791
@@ -7864,7 +7961,7 @@ static struct pci_driver ipr_driver = {
7864 .name = IPR_NAME, 7961 .name = IPR_NAME,
7865 .id_table = ipr_pci_table, 7962 .id_table = ipr_pci_table,
7866 .probe = ipr_probe, 7963 .probe = ipr_probe,
7867 .remove = ipr_remove, 7964 .remove = __devexit_p(ipr_remove),
7868 .shutdown = ipr_shutdown, 7965 .shutdown = ipr_shutdown,
7869 .err_handler = &ipr_err_handler, 7966 .err_handler = &ipr_err_handler,
7870}; 7967};
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 79a3ae4fb2c7..4b63dd6b1c81 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -37,8 +37,8 @@
37/* 37/*
38 * Literals 38 * Literals
39 */ 39 */
40#define IPR_DRIVER_VERSION "2.4.2" 40#define IPR_DRIVER_VERSION "2.4.3"
41#define IPR_DRIVER_DATE "(January 21, 2009)" 41#define IPR_DRIVER_DATE "(June 10, 2009)"
42 42
43/* 43/*
44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -1025,6 +1025,9 @@ struct ipr_chip_cfg_t {
1025struct ipr_chip_t { 1025struct ipr_chip_t {
1026 u16 vendor; 1026 u16 vendor;
1027 u16 device; 1027 u16 device;
1028 u16 intr_type;
1029#define IPR_USE_LSI 0x00
1030#define IPR_USE_MSI 0x01
1028 const struct ipr_chip_cfg_t *cfg; 1031 const struct ipr_chip_cfg_t *cfg;
1029}; 1032};
1030 1033
@@ -1094,6 +1097,7 @@ struct ipr_ioa_cfg {
1094 u8 needs_hard_reset:1; 1097 u8 needs_hard_reset:1;
1095 u8 dual_raid:1; 1098 u8 dual_raid:1;
1096 u8 needs_warm_reset:1; 1099 u8 needs_warm_reset:1;
1100 u8 msi_received:1;
1097 1101
1098 u8 revid; 1102 u8 revid;
1099 1103
@@ -1159,6 +1163,7 @@ struct ipr_ioa_cfg {
1159 1163
1160 unsigned int transop_timeout; 1164 unsigned int transop_timeout;
1161 const struct ipr_chip_cfg_t *chip_cfg; 1165 const struct ipr_chip_cfg_t *chip_cfg;
1166 const struct ipr_chip_t *ipr_chip;
1162 1167
1163 void __iomem *hdw_dma_regs; /* iomapped PCI memory space */ 1168 void __iomem *hdw_dma_regs; /* iomapped PCI memory space */
1164 unsigned long hdw_dma_regs_pci; /* raw PCI memory space */ 1169 unsigned long hdw_dma_regs_pci; /* raw PCI memory space */
@@ -1179,6 +1184,7 @@ struct ipr_ioa_cfg {
1179 struct work_struct work_q; 1184 struct work_struct work_q;
1180 1185
1181 wait_queue_head_t reset_wait_q; 1186 wait_queue_head_t reset_wait_q;
1187 wait_queue_head_t msi_wait_q;
1182 1188
1183 struct ipr_dump *dump; 1189 struct ipr_dump *dump;
1184 enum ipr_sdt_state sdt_state; 1190 enum ipr_sdt_state sdt_state;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index b7c092d63bbe..518dbd91df85 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -253,8 +253,6 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
253 253
254 if (r < 0) { 254 if (r < 0) {
255 iscsi_tcp_segment_unmap(segment); 255 iscsi_tcp_segment_unmap(segment);
256 if (copied || r == -EAGAIN)
257 break;
258 return r; 256 return r;
259 } 257 }
260 copied += r; 258 copied += r;
@@ -275,11 +273,17 @@ static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
275 273
276 while (1) { 274 while (1) {
277 rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment); 275 rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
278 if (rc < 0) { 276 /*
277 * We may not have been able to send data because the conn
278 * is getting stopped. libiscsi will know so propogate err
279 * for it to do the right thing.
280 */
281 if (rc == -EAGAIN)
282 return rc;
283 else if (rc < 0) {
279 rc = ISCSI_ERR_XMIT_FAILED; 284 rc = ISCSI_ERR_XMIT_FAILED;
280 goto error; 285 goto error;
281 } 286 } else if (rc == 0)
282 if (rc == 0)
283 break; 287 break;
284 288
285 consumed += rc; 289 consumed += rc;
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 4c880656990b..6fabf66972b9 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -45,14 +45,6 @@
45 45
46#define FC_DISC_DELAY 3 46#define FC_DISC_DELAY 3
47 47
48static int fc_disc_debug;
49
50#define FC_DEBUG_DISC(fmt...) \
51 do { \
52 if (fc_disc_debug) \
53 FC_DBG(fmt); \
54 } while (0)
55
56static void fc_disc_gpn_ft_req(struct fc_disc *); 48static void fc_disc_gpn_ft_req(struct fc_disc *);
57static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); 49static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
58static int fc_disc_new_target(struct fc_disc *, struct fc_rport *, 50static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
@@ -137,8 +129,8 @@ static void fc_disc_rport_callback(struct fc_lport *lport,
137 struct fc_rport_libfc_priv *rdata = rport->dd_data; 129 struct fc_rport_libfc_priv *rdata = rport->dd_data;
138 struct fc_disc *disc = &lport->disc; 130 struct fc_disc *disc = &lport->disc;
139 131
140 FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event, 132 FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event,
141 rport->port_id); 133 rport->port_id);
142 134
143 switch (event) { 135 switch (event) {
144 case RPORT_EV_CREATED: 136 case RPORT_EV_CREATED:
@@ -191,8 +183,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
191 183
192 lport = disc->lport; 184 lport = disc->lport;
193 185
194 FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n", 186 FC_DISC_DBG(disc, "Received an RSCN event\n");
195 fc_host_port_id(lport->host));
196 187
197 /* make sure the frame contains an RSCN message */ 188 /* make sure the frame contains an RSCN message */
198 rp = fc_frame_payload_get(fp, sizeof(*rp)); 189 rp = fc_frame_payload_get(fp, sizeof(*rp));
@@ -225,8 +216,8 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
225 */ 216 */
226 switch (fmt) { 217 switch (fmt) {
227 case ELS_ADDR_FMT_PORT: 218 case ELS_ADDR_FMT_PORT:
228 FC_DEBUG_DISC("Port address format for port (%6x)\n", 219 FC_DISC_DBG(disc, "Port address format for port "
229 ntoh24(pp->rscn_fid)); 220 "(%6x)\n", ntoh24(pp->rscn_fid));
230 dp = kzalloc(sizeof(*dp), GFP_KERNEL); 221 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
231 if (!dp) { 222 if (!dp) {
232 redisc = 1; 223 redisc = 1;
@@ -243,19 +234,19 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
243 case ELS_ADDR_FMT_DOM: 234 case ELS_ADDR_FMT_DOM:
244 case ELS_ADDR_FMT_FAB: 235 case ELS_ADDR_FMT_FAB:
245 default: 236 default:
246 FC_DEBUG_DISC("Address format is (%d)\n", fmt); 237 FC_DISC_DBG(disc, "Address format is (%d)\n", fmt);
247 redisc = 1; 238 redisc = 1;
248 break; 239 break;
249 } 240 }
250 } 241 }
251 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); 242 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
252 if (redisc) { 243 if (redisc) {
253 FC_DEBUG_DISC("RSCN received: rediscovering\n"); 244 FC_DISC_DBG(disc, "RSCN received: rediscovering\n");
254 fc_disc_restart(disc); 245 fc_disc_restart(disc);
255 } else { 246 } else {
256 FC_DEBUG_DISC("RSCN received: not rediscovering. " 247 FC_DISC_DBG(disc, "RSCN received: not rediscovering. "
257 "redisc %d state %d in_prog %d\n", 248 "redisc %d state %d in_prog %d\n",
258 redisc, lport->state, disc->pending); 249 redisc, lport->state, disc->pending);
259 list_for_each_entry_safe(dp, next, &disc_ports, peers) { 250 list_for_each_entry_safe(dp, next, &disc_ports, peers) {
260 list_del(&dp->peers); 251 list_del(&dp->peers);
261 rport = lport->tt.rport_lookup(lport, dp->ids.port_id); 252 rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
@@ -270,7 +261,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
270 fc_frame_free(fp); 261 fc_frame_free(fp);
271 return; 262 return;
272reject: 263reject:
273 FC_DEBUG_DISC("Received a bad RSCN frame\n"); 264 FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
274 rjt_data.fp = NULL; 265 rjt_data.fp = NULL;
275 rjt_data.reason = ELS_RJT_LOGIC; 266 rjt_data.reason = ELS_RJT_LOGIC;
276 rjt_data.explan = ELS_EXPL_NONE; 267 rjt_data.explan = ELS_EXPL_NONE;
@@ -302,7 +293,8 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
302 mutex_unlock(&disc->disc_mutex); 293 mutex_unlock(&disc->disc_mutex);
303 break; 294 break;
304 default: 295 default:
305 FC_DBG("Received an unsupported request. opcode (%x)\n", op); 296 FC_DISC_DBG(disc, "Received an unsupported request, "
297 "the opcode is (%x)\n", op);
306 break; 298 break;
307 } 299 }
308} 300}
@@ -320,12 +312,10 @@ static void fc_disc_restart(struct fc_disc *disc)
320 struct fc_rport_libfc_priv *rdata, *next; 312 struct fc_rport_libfc_priv *rdata, *next;
321 struct fc_lport *lport = disc->lport; 313 struct fc_lport *lport = disc->lport;
322 314
323 FC_DEBUG_DISC("Restarting discovery for port (%6x)\n", 315 FC_DISC_DBG(disc, "Restarting discovery\n");
324 fc_host_port_id(lport->host));
325 316
326 list_for_each_entry_safe(rdata, next, &disc->rports, peers) { 317 list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
327 rport = PRIV_TO_RPORT(rdata); 318 rport = PRIV_TO_RPORT(rdata);
328 FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id);
329 list_del(&rdata->peers); 319 list_del(&rdata->peers);
330 lport->tt.rport_logoff(rport); 320 lport->tt.rport_logoff(rport);
331 } 321 }
@@ -485,8 +475,7 @@ static void fc_disc_done(struct fc_disc *disc)
485 struct fc_lport *lport = disc->lport; 475 struct fc_lport *lport = disc->lport;
486 enum fc_disc_event event; 476 enum fc_disc_event event;
487 477
488 FC_DEBUG_DISC("Discovery complete for port (%6x)\n", 478 FC_DISC_DBG(disc, "Discovery complete\n");
489 fc_host_port_id(lport->host));
490 479
491 event = disc->event; 480 event = disc->event;
492 disc->event = DISC_EV_NONE; 481 disc->event = DISC_EV_NONE;
@@ -510,10 +499,10 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
510{ 499{
511 struct fc_lport *lport = disc->lport; 500 struct fc_lport *lport = disc->lport;
512 unsigned long delay = 0; 501 unsigned long delay = 0;
513 if (fc_disc_debug) 502
514 FC_DBG("Error %ld, retries %d/%d\n", 503 FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n",
515 PTR_ERR(fp), disc->retry_count, 504 PTR_ERR(fp), disc->retry_count,
516 FC_DISC_RETRY_LIMIT); 505 FC_DISC_RETRY_LIMIT);
517 506
518 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { 507 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
519 /* 508 /*
@@ -649,9 +638,9 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
649 &disc->rogue_rports); 638 &disc->rogue_rports);
650 lport->tt.rport_login(rport); 639 lport->tt.rport_login(rport);
651 } else 640 } else
652 FC_DBG("Failed to allocate memory for " 641 printk(KERN_WARNING "libfc: Failed to allocate "
653 "the newly discovered port (%6x)\n", 642 "memory for the newly discovered port "
654 dp.ids.port_id); 643 "(%6x)\n", dp.ids.port_id);
655 } 644 }
656 645
657 if (np->fp_flags & FC_NS_FID_LAST) { 646 if (np->fp_flags & FC_NS_FID_LAST) {
@@ -671,9 +660,8 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
671 */ 660 */
672 if (error == 0 && len > 0 && len < sizeof(*np)) { 661 if (error == 0 && len > 0 && len < sizeof(*np)) {
673 if (np != &disc->partial_buf) { 662 if (np != &disc->partial_buf) {
674 FC_DEBUG_DISC("Partial buffer remains " 663 FC_DISC_DBG(disc, "Partial buffer remains "
675 "for discovery by (%6x)\n", 664 "for discovery\n");
676 fc_host_port_id(lport->host));
677 memcpy(&disc->partial_buf, np, len); 665 memcpy(&disc->partial_buf, np, len);
678 } 666 }
679 disc->buf_len = (unsigned char) len; 667 disc->buf_len = (unsigned char) len;
@@ -721,8 +709,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
721 int error; 709 int error;
722 710
723 mutex_lock(&disc->disc_mutex); 711 mutex_lock(&disc->disc_mutex);
724 FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n", 712 FC_DISC_DBG(disc, "Received a GPN_FT response\n");
725 fc_host_port_id(disc->lport->host));
726 713
727 if (IS_ERR(fp)) { 714 if (IS_ERR(fp)) {
728 fc_disc_error(disc, fp); 715 fc_disc_error(disc, fp);
@@ -738,30 +725,30 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
738 disc->seq_count == 0) { 725 disc->seq_count == 0) {
739 cp = fc_frame_payload_get(fp, sizeof(*cp)); 726 cp = fc_frame_payload_get(fp, sizeof(*cp));
740 if (!cp) { 727 if (!cp) {
741 FC_DBG("GPN_FT response too short, len %d\n", 728 FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n",
742 fr_len(fp)); 729 fr_len(fp));
743 } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) { 730 } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
744 731
745 /* Accepted, parse the response. */ 732 /* Accepted, parse the response. */
746 buf = cp + 1; 733 buf = cp + 1;
747 len -= sizeof(*cp); 734 len -= sizeof(*cp);
748 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { 735 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
749 FC_DBG("GPN_FT rejected reason %x exp %x " 736 FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x "
750 "(check zoning)\n", cp->ct_reason, 737 "(check zoning)\n", cp->ct_reason,
751 cp->ct_explan); 738 cp->ct_explan);
752 disc->event = DISC_EV_FAILED; 739 disc->event = DISC_EV_FAILED;
753 fc_disc_done(disc); 740 fc_disc_done(disc);
754 } else { 741 } else {
755 FC_DBG("GPN_FT unexpected response code %x\n", 742 FC_DISC_DBG(disc, "GPN_FT unexpected response code "
756 ntohs(cp->ct_cmd)); 743 "%x\n", ntohs(cp->ct_cmd));
757 } 744 }
758 } else if (fr_sof(fp) == FC_SOF_N3 && 745 } else if (fr_sof(fp) == FC_SOF_N3 &&
759 seq_cnt == disc->seq_count) { 746 seq_cnt == disc->seq_count) {
760 buf = fh + 1; 747 buf = fh + 1;
761 } else { 748 } else {
762 FC_DBG("GPN_FT unexpected frame - out of sequence? " 749 FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? "
763 "seq_cnt %x expected %x sof %x eof %x\n", 750 "seq_cnt %x expected %x sof %x eof %x\n",
764 seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp)); 751 seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
765 } 752 }
766 if (buf) { 753 if (buf) {
767 error = fc_disc_gpn_ft_parse(disc, buf, len); 754 error = fc_disc_gpn_ft_parse(disc, buf, len);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 992af05aacf1..2bc22be5f849 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -32,18 +32,7 @@
32#include <scsi/libfc.h> 32#include <scsi/libfc.h>
33#include <scsi/fc_encode.h> 33#include <scsi/fc_encode.h>
34 34
35/* 35static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
36 * fc_exch_debug can be set in debugger or at compile time to get more logs.
37 */
38static int fc_exch_debug;
39
40#define FC_DEBUG_EXCH(fmt...) \
41 do { \
42 if (fc_exch_debug) \
43 FC_DBG(fmt); \
44 } while (0)
45
46static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
47 36
48/* 37/*
49 * Structure and function definitions for managing Fibre Channel Exchanges 38 * Structure and function definitions for managing Fibre Channel Exchanges
@@ -333,8 +322,8 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
333 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) 322 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
334 return; 323 return;
335 324
336 FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n", 325 FC_EXCH_DBG(ep, "Exchange timed out, notifying the upper layer\n");
337 ep->xid); 326
338 if (schedule_delayed_work(&ep->timeout_work, 327 if (schedule_delayed_work(&ep->timeout_work,
339 msecs_to_jiffies(timer_msec))) 328 msecs_to_jiffies(timer_msec)))
340 fc_exch_hold(ep); /* hold for timer */ 329 fc_exch_hold(ep); /* hold for timer */
@@ -545,7 +534,7 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
545 /* alloc a new xid */ 534 /* alloc a new xid */
546 xid = fc_em_alloc_xid(mp, fp); 535 xid = fc_em_alloc_xid(mp, fp);
547 if (!xid) { 536 if (!xid) {
548 printk(KERN_ERR "fc_em_alloc_xid() failed\n"); 537 printk(KERN_WARNING "libfc: Failed to allocate an exhange\n");
549 goto err; 538 goto err;
550 } 539 }
551 } 540 }
@@ -820,8 +809,8 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
820 struct fc_exch *ep = fc_seq_exch(sp); 809 struct fc_exch *ep = fc_seq_exch(sp);
821 810
822 sp = fc_seq_alloc(ep, ep->seq_id++); 811 sp = fc_seq_alloc(ep, ep->seq_id++);
823 FC_DEBUG_EXCH("exch %4x f_ctl %6x seq %2x\n", 812 FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
824 ep->xid, ep->f_ctl, sp->id); 813 ep->f_ctl, sp->id);
825 return sp; 814 return sp;
826} 815}
827/* 816/*
@@ -901,7 +890,7 @@ void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
901 fc_exch_els_rec(sp, els_data->fp); 890 fc_exch_els_rec(sp, els_data->fp);
902 break; 891 break;
903 default: 892 default:
904 FC_DBG("Invalid ELS CMD:%x\n", els_cmd); 893 FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd);
905 } 894 }
906} 895}
907EXPORT_SYMBOL(fc_seq_els_rsp_send); 896EXPORT_SYMBOL(fc_seq_els_rsp_send);
@@ -1134,7 +1123,7 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
1134 lp->tt.lport_recv(lp, sp, fp); 1123 lp->tt.lport_recv(lp, sp, fp);
1135 fc_exch_release(ep); /* release from lookup */ 1124 fc_exch_release(ep); /* release from lookup */
1136 } else { 1125 } else {
1137 FC_DEBUG_EXCH("exch/seq lookup failed: reject %x\n", reject); 1126 FC_EM_DBG(mp, "exch/seq lookup failed: reject %x\n", reject);
1138 fc_frame_free(fp); 1127 fc_frame_free(fp);
1139 } 1128 }
1140} 1129}
@@ -1159,6 +1148,10 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1159 atomic_inc(&mp->stats.xid_not_found); 1148 atomic_inc(&mp->stats.xid_not_found);
1160 goto out; 1149 goto out;
1161 } 1150 }
1151 if (ep->esb_stat & ESB_ST_COMPLETE) {
1152 atomic_inc(&mp->stats.xid_not_found);
1153 goto out;
1154 }
1162 if (ep->rxid == FC_XID_UNKNOWN) 1155 if (ep->rxid == FC_XID_UNKNOWN)
1163 ep->rxid = ntohs(fh->fh_rx_id); 1156 ep->rxid = ntohs(fh->fh_rx_id);
1164 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { 1157 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
@@ -1238,10 +1231,10 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1238 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ 1231 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
1239 if (!sp) { 1232 if (!sp) {
1240 atomic_inc(&mp->stats.xid_not_found); 1233 atomic_inc(&mp->stats.xid_not_found);
1241 FC_DEBUG_EXCH("seq lookup failed\n"); 1234 FC_EM_DBG(mp, "seq lookup failed\n");
1242 } else { 1235 } else {
1243 atomic_inc(&mp->stats.non_bls_resp); 1236 atomic_inc(&mp->stats.non_bls_resp);
1244 FC_DEBUG_EXCH("non-BLS response to sequence"); 1237 FC_EM_DBG(mp, "non-BLS response to sequence");
1245 } 1238 }
1246 fc_frame_free(fp); 1239 fc_frame_free(fp);
1247} 1240}
@@ -1262,8 +1255,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1262 int rc = 1, has_rec = 0; 1255 int rc = 1, has_rec = 0;
1263 1256
1264 fh = fc_frame_header_get(fp); 1257 fh = fc_frame_header_get(fp);
1265 FC_DEBUG_EXCH("exch: BLS rctl %x - %s\n", 1258 FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
1266 fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl)); 1259 fc_exch_rctl_name(fh->fh_r_ctl));
1267 1260
1268 if (cancel_delayed_work_sync(&ep->timeout_work)) 1261 if (cancel_delayed_work_sync(&ep->timeout_work))
1269 fc_exch_release(ep); /* release from pending timer hold */ 1262 fc_exch_release(ep); /* release from pending timer hold */
@@ -1355,9 +1348,9 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1355 case FC_RCTL_ACK_0: 1348 case FC_RCTL_ACK_0:
1356 break; 1349 break;
1357 default: 1350 default:
1358 FC_DEBUG_EXCH("BLS rctl %x - %s received", 1351 FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
1359 fh->fh_r_ctl, 1352 fh->fh_r_ctl,
1360 fc_exch_rctl_name(fh->fh_r_ctl)); 1353 fc_exch_rctl_name(fh->fh_r_ctl));
1361 break; 1354 break;
1362 } 1355 }
1363 fc_frame_free(fp); 1356 fc_frame_free(fp);
@@ -1595,7 +1588,8 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1595 1588
1596 if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT) 1589 if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
1597 goto cleanup; 1590 goto cleanup;
1598 FC_DBG("Cannot process RRQ, because of frame error %d\n", err); 1591 FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
1592 "frame error %d\n", err);
1599 return; 1593 return;
1600 } 1594 }
1601 1595
@@ -1604,12 +1598,13 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1604 1598
1605 switch (op) { 1599 switch (op) {
1606 case ELS_LS_RJT: 1600 case ELS_LS_RJT:
1607 FC_DBG("LS_RJT for RRQ"); 1601 FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ");
1608 /* fall through */ 1602 /* fall through */
1609 case ELS_LS_ACC: 1603 case ELS_LS_ACC:
1610 goto cleanup; 1604 goto cleanup;
1611 default: 1605 default:
1612 FC_DBG("unexpected response op %x for RRQ", op); 1606 FC_EXCH_DBG(aborted_ep, "unexpected response op %x "
1607 "for RRQ", op);
1613 return; 1608 return;
1614 } 1609 }
1615 1610
@@ -1736,8 +1731,8 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
1736 size_t len; 1731 size_t len;
1737 1732
1738 if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) { 1733 if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
1739 FC_DBG("Invalid min_xid 0x:%x and max_xid 0x:%x\n", 1734 FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
1740 min_xid, max_xid); 1735 min_xid, max_xid);
1741 return NULL; 1736 return NULL;
1742 } 1737 }
1743 1738
@@ -1874,7 +1869,8 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
1874 1869
1875 /* lport lock ? */ 1870 /* lport lock ? */
1876 if (!lp || !mp || (lp->state == LPORT_ST_NONE)) { 1871 if (!lp || !mp || (lp->state == LPORT_ST_NONE)) {
1877 FC_DBG("fc_lport or EM is not allocated and configured"); 1872 FC_LPORT_DBG(lp, "Receiving frames for an lport that "
1873 "has not been initialized correctly\n");
1878 fc_frame_free(fp); 1874 fc_frame_free(fp);
1879 return; 1875 return;
1880 } 1876 }
@@ -1900,7 +1896,7 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
1900 fc_exch_recv_req(lp, mp, fp); 1896 fc_exch_recv_req(lp, mp, fp);
1901 break; 1897 break;
1902 default: 1898 default:
1903 FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp)); 1899 FC_EM_DBG(mp, "dropping invalid frame (eof %x)", fr_eof(fp));
1904 fc_frame_free(fp); 1900 fc_frame_free(fp);
1905 break; 1901 break;
1906 } 1902 }
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 521f996f9b13..e303e0d12c4b 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -43,13 +43,9 @@ MODULE_AUTHOR("Open-FCoE.org");
43MODULE_DESCRIPTION("libfc"); 43MODULE_DESCRIPTION("libfc");
44MODULE_LICENSE("GPL v2"); 44MODULE_LICENSE("GPL v2");
45 45
46static int fc_fcp_debug; 46unsigned int fc_debug_logging;
47 47module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
48#define FC_DEBUG_FCP(fmt...) \ 48MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
49 do { \
50 if (fc_fcp_debug) \
51 FC_DBG(fmt); \
52 } while (0)
53 49
54static struct kmem_cache *scsi_pkt_cachep; 50static struct kmem_cache *scsi_pkt_cachep;
55 51
@@ -347,8 +343,8 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
347 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && 343 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
348 fc_frame_crc_check(fp)) 344 fc_frame_crc_check(fp))
349 goto crc_err; 345 goto crc_err;
350 FC_DEBUG_FCP("data received past end. len %zx offset %zx " 346 FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
351 "data_len %x\n", len, offset, fsp->data_len); 347 "data_len %x\n", len, offset, fsp->data_len);
352 fc_fcp_retry_cmd(fsp); 348 fc_fcp_retry_cmd(fsp);
353 return; 349 return;
354 } 350 }
@@ -411,7 +407,8 @@ crc_err:
411 stats->ErrorFrames++; 407 stats->ErrorFrames++;
412 /* FIXME - per cpu count, not total count! */ 408 /* FIXME - per cpu count, not total count! */
413 if (stats->InvalidCRCCount++ < 5) 409 if (stats->InvalidCRCCount++ < 5)
414 printk(KERN_WARNING "CRC error on data frame for port (%6x)\n", 410 printk(KERN_WARNING "libfc: CRC error on data "
411 "frame for port (%6x)\n",
415 fc_host_port_id(lp->host)); 412 fc_host_port_id(lp->host));
416 /* 413 /*
417 * Assume the frame is total garbage. 414 * Assume the frame is total garbage.
@@ -475,14 +472,14 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
475 WARN_ON(seq_blen <= 0); 472 WARN_ON(seq_blen <= 0);
476 if (unlikely(offset + seq_blen > fsp->data_len)) { 473 if (unlikely(offset + seq_blen > fsp->data_len)) {
477 /* this should never happen */ 474 /* this should never happen */
478 FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n", 475 FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx "
479 seq_blen, offset); 476 "offset %zx\n", seq_blen, offset);
480 fc_fcp_send_abort(fsp); 477 fc_fcp_send_abort(fsp);
481 return 0; 478 return 0;
482 } else if (offset != fsp->xfer_len) { 479 } else if (offset != fsp->xfer_len) {
483 /* Out of Order Data Request - no problem, but unexpected. */ 480 /* Out of Order Data Request - no problem, but unexpected. */
484 FC_DEBUG_FCP("xfer-ready non-contiguous. " 481 FC_FCP_DBG(fsp, "xfer-ready non-contiguous. "
485 "seq_blen %zx offset %zx\n", seq_blen, offset); 482 "seq_blen %zx offset %zx\n", seq_blen, offset);
486 } 483 }
487 484
488 /* 485 /*
@@ -493,7 +490,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
493 t_blen = fsp->max_payload; 490 t_blen = fsp->max_payload;
494 if (lp->seq_offload) { 491 if (lp->seq_offload) {
495 t_blen = min(seq_blen, (size_t)lp->lso_max); 492 t_blen = min(seq_blen, (size_t)lp->lso_max);
496 FC_DEBUG_FCP("fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", 493 FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
497 fsp, seq_blen, lp->lso_max, t_blen); 494 fsp, seq_blen, lp->lso_max, t_blen);
498 } 495 }
499 496
@@ -694,7 +691,7 @@ static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
694 if (!can_queue) 691 if (!can_queue)
695 can_queue = 1; 692 can_queue = 1;
696 lp->host->can_queue = can_queue; 693 lp->host->can_queue = can_queue;
697 shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n" 694 shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n"
698 "Reducing can_queue to %d.\n", can_queue); 695 "Reducing can_queue to %d.\n", can_queue);
699done: 696done:
700 spin_unlock_irqrestore(lp->host->host_lock, flags); 697 spin_unlock_irqrestore(lp->host->host_lock, flags);
@@ -768,7 +765,7 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
768 765
769 fc_fcp_resp(fsp, fp); 766 fc_fcp_resp(fsp, fp);
770 } else { 767 } else {
771 FC_DBG("unexpected frame. r_ctl %x\n", r_ctl); 768 FC_FCP_DBG(fsp, "unexpected frame. r_ctl %x\n", r_ctl);
772 } 769 }
773unlock: 770unlock:
774 fc_fcp_unlock_pkt(fsp); 771 fc_fcp_unlock_pkt(fsp);
@@ -877,17 +874,17 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
877 return; 874 return;
878 } 875 }
879 fsp->status_code = FC_DATA_OVRRUN; 876 fsp->status_code = FC_DATA_OVRRUN;
880 FC_DBG("tgt %6x xfer len %zx greater than expected len %x. " 877 FC_FCP_DBG(fsp, "tgt %6x xfer len %zx greater than expected, "
881 "data len %x\n", 878 "len %x, data len %x\n",
882 fsp->rport->port_id, 879 fsp->rport->port_id,
883 fsp->xfer_len, expected_len, fsp->data_len); 880 fsp->xfer_len, expected_len, fsp->data_len);
884 } 881 }
885 fc_fcp_complete_locked(fsp); 882 fc_fcp_complete_locked(fsp);
886 return; 883 return;
887 884
888len_err: 885len_err:
889 FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n", 886 FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u "
890 flags, fr_len(fp), respl, snsl); 887 "snsl %u\n", flags, fr_len(fp), respl, snsl);
891err: 888err:
892 fsp->status_code = FC_ERROR; 889 fsp->status_code = FC_ERROR;
893 fc_fcp_complete_locked(fsp); 890 fc_fcp_complete_locked(fsp);
@@ -1107,13 +1104,11 @@ static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1107 if (fc_fcp_lock_pkt(fsp)) 1104 if (fc_fcp_lock_pkt(fsp))
1108 return; 1105 return;
1109 1106
1110 switch (error) { 1107 if (error == -FC_EX_CLOSED) {
1111 case -FC_EX_CLOSED:
1112 fc_fcp_retry_cmd(fsp); 1108 fc_fcp_retry_cmd(fsp);
1113 goto unlock; 1109 goto unlock;
1114 default:
1115 FC_DBG("unknown error %ld\n", PTR_ERR(fp));
1116 } 1110 }
1111
1117 /* 1112 /*
1118 * clear abort pending, because the lower layer 1113 * clear abort pending, because the lower layer
1119 * decided to force completion. 1114 * decided to force completion.
@@ -1145,10 +1140,10 @@ static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
1145 fsp->wait_for_comp = 0; 1140 fsp->wait_for_comp = 0;
1146 1141
1147 if (!rc) { 1142 if (!rc) {
1148 FC_DBG("target abort cmd failed\n"); 1143 FC_FCP_DBG(fsp, "target abort cmd failed\n");
1149 rc = FAILED; 1144 rc = FAILED;
1150 } else if (fsp->state & FC_SRB_ABORTED) { 1145 } else if (fsp->state & FC_SRB_ABORTED) {
1151 FC_DBG("target abort cmd passed\n"); 1146 FC_FCP_DBG(fsp, "target abort cmd passed\n");
1152 rc = SUCCESS; 1147 rc = SUCCESS;
1153 fc_fcp_complete_locked(fsp); 1148 fc_fcp_complete_locked(fsp);
1154 } 1149 }
@@ -1213,7 +1208,7 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
1213 spin_unlock_bh(&fsp->scsi_pkt_lock); 1208 spin_unlock_bh(&fsp->scsi_pkt_lock);
1214 1209
1215 if (!rc) { 1210 if (!rc) {
1216 FC_DBG("lun reset failed\n"); 1211 FC_SCSI_DBG(lp, "lun reset failed\n");
1217 return FAILED; 1212 return FAILED;
1218 } 1213 }
1219 1214
@@ -1221,7 +1216,7 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
1221 if (fsp->cdb_status != FCP_TMF_CMPL) 1216 if (fsp->cdb_status != FCP_TMF_CMPL)
1222 return FAILED; 1217 return FAILED;
1223 1218
1224 FC_DBG("lun reset to lun %u completed\n", lun); 1219 FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun);
1225 fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); 1220 fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED);
1226 return SUCCESS; 1221 return SUCCESS;
1227} 1222}
@@ -1388,13 +1383,13 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1388 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 1383 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1389 switch (rjt->er_reason) { 1384 switch (rjt->er_reason) {
1390 default: 1385 default:
1391 FC_DEBUG_FCP("device %x unexpected REC reject " 1386 FC_FCP_DBG(fsp, "device %x unexpected REC reject "
1392 "reason %d expl %d\n", 1387 "reason %d expl %d\n",
1393 fsp->rport->port_id, rjt->er_reason, 1388 fsp->rport->port_id, rjt->er_reason,
1394 rjt->er_explan); 1389 rjt->er_explan);
1395 /* fall through */ 1390 /* fall through */
1396 case ELS_RJT_UNSUP: 1391 case ELS_RJT_UNSUP:
1397 FC_DEBUG_FCP("device does not support REC\n"); 1392 FC_FCP_DBG(fsp, "device does not support REC\n");
1398 rp = fsp->rport->dd_data; 1393 rp = fsp->rport->dd_data;
1399 /* 1394 /*
1400 * if we do not spport RECs or got some bogus 1395 * if we do not spport RECs or got some bogus
@@ -1514,8 +1509,8 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1514 break; 1509 break;
1515 1510
1516 default: 1511 default:
1517 FC_DBG("REC %p fid %x error unexpected error %d\n", 1512 FC_FCP_DBG(fsp, "REC %p fid %x error unexpected error %d\n",
1518 fsp, fsp->rport->port_id, error); 1513 fsp, fsp->rport->port_id, error);
1519 fsp->status_code = FC_CMD_PLOGO; 1514 fsp->status_code = FC_CMD_PLOGO;
1520 /* fall through */ 1515 /* fall through */
1521 1516
@@ -1524,9 +1519,9 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1524 * Assume REC or LS_ACC was lost. 1519 * Assume REC or LS_ACC was lost.
1525 * The exchange manager will have aborted REC, so retry. 1520 * The exchange manager will have aborted REC, so retry.
1526 */ 1521 */
1527 FC_DBG("REC fid %x error error %d retry %d/%d\n", 1522 FC_FCP_DBG(fsp, "REC fid %x error error %d retry %d/%d\n",
1528 fsp->rport->port_id, error, fsp->recov_retry, 1523 fsp->rport->port_id, error, fsp->recov_retry,
1529 FC_MAX_RECOV_RETRY); 1524 FC_MAX_RECOV_RETRY);
1530 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1525 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1531 fc_fcp_rec(fsp); 1526 fc_fcp_rec(fsp);
1532 else 1527 else
@@ -1896,7 +1891,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1896 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 1891 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1897 break; 1892 break;
1898 case FC_CMD_ABORTED: 1893 case FC_CMD_ABORTED:
1899 sc_cmd->result = (DID_ABORT << 16) | fsp->io_status; 1894 sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
1900 break; 1895 break;
1901 case FC_CMD_TIME_OUT: 1896 case FC_CMD_TIME_OUT:
1902 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; 1897 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
@@ -2011,9 +2006,11 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
2011 if (lp->state != LPORT_ST_READY) 2006 if (lp->state != LPORT_ST_READY)
2012 return rc; 2007 return rc;
2013 2008
2009 FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id);
2010
2014 fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); 2011 fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO);
2015 if (fsp == NULL) { 2012 if (fsp == NULL) {
2016 FC_DBG("could not allocate scsi_pkt\n"); 2013 printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n");
2017 sc_cmd->result = DID_NO_CONNECT << 16; 2014 sc_cmd->result = DID_NO_CONNECT << 16;
2018 goto out; 2015 goto out;
2019 } 2016 }
@@ -2048,17 +2045,21 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
2048 struct fc_lport *lp = shost_priv(shost); 2045 struct fc_lport *lp = shost_priv(shost);
2049 unsigned long wait_tmo; 2046 unsigned long wait_tmo;
2050 2047
2048 FC_SCSI_DBG(lp, "Resetting host\n");
2049
2051 lp->tt.lport_reset(lp); 2050 lp->tt.lport_reset(lp);
2052 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; 2051 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
2053 while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) 2052 while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo))
2054 msleep(1000); 2053 msleep(1000);
2055 2054
2056 if (fc_fcp_lport_queue_ready(lp)) { 2055 if (fc_fcp_lport_queue_ready(lp)) {
2057 shost_printk(KERN_INFO, shost, "Host reset succeeded.\n"); 2056 shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
2057 "on port (%6x)\n", fc_host_port_id(lp->host));
2058 return SUCCESS; 2058 return SUCCESS;
2059 } else { 2059 } else {
2060 shost_printk(KERN_INFO, shost, "Host reset failed. " 2060 shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
2061 "lport not ready.\n"); 2061 "port (%6x) is not ready.\n",
2062 fc_host_port_id(lp->host));
2062 return FAILED; 2063 return FAILED;
2063 } 2064 }
2064} 2065}
@@ -2117,7 +2118,8 @@ void fc_fcp_destroy(struct fc_lport *lp)
2117 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 2118 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
2118 2119
2119 if (!list_empty(&si->scsi_pkt_queue)) 2120 if (!list_empty(&si->scsi_pkt_queue))
2120 printk(KERN_ERR "Leaked scsi packets.\n"); 2121 printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
2122 "port (%6x)\n", fc_host_port_id(lp->host));
2121 2123
2122 mempool_destroy(si->scsi_pkt_pool); 2124 mempool_destroy(si->scsi_pkt_pool);
2123 kfree(si); 2125 kfree(si);
@@ -2166,7 +2168,8 @@ static int __init libfc_init(void)
2166 sizeof(struct fc_fcp_pkt), 2168 sizeof(struct fc_fcp_pkt),
2167 0, SLAB_HWCACHE_ALIGN, NULL); 2169 0, SLAB_HWCACHE_ALIGN, NULL);
2168 if (scsi_pkt_cachep == NULL) { 2170 if (scsi_pkt_cachep == NULL) {
2169 FC_DBG("Unable to allocate SRB cache...module load failed!"); 2171 printk(KERN_ERR "libfc: Unable to allocate SRB cache, "
2172 "module load failed!");
2170 return -ENOMEM; 2173 return -ENOMEM;
2171 } 2174 }
2172 2175
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e0c247724d2b..745fa5555d6a 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -101,14 +101,6 @@
101 101
102#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/ 102#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
103 103
104static int fc_lport_debug;
105
106#define FC_DEBUG_LPORT(fmt...) \
107 do { \
108 if (fc_lport_debug) \
109 FC_DBG(fmt); \
110 } while (0)
111
112static void fc_lport_error(struct fc_lport *, struct fc_frame *); 104static void fc_lport_error(struct fc_lport *, struct fc_frame *);
113 105
114static void fc_lport_enter_reset(struct fc_lport *); 106static void fc_lport_enter_reset(struct fc_lport *);
@@ -151,8 +143,8 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
151 struct fc_rport *rport, 143 struct fc_rport *rport,
152 enum fc_rport_event event) 144 enum fc_rport_event event)
153{ 145{
154 FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event, 146 FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
155 rport->port_id); 147 rport->port_id);
156 148
157 switch (event) { 149 switch (event) {
158 case RPORT_EV_CREATED: 150 case RPORT_EV_CREATED:
@@ -162,19 +154,19 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
162 lport->dns_rp = rport; 154 lport->dns_rp = rport;
163 fc_lport_enter_rpn_id(lport); 155 fc_lport_enter_rpn_id(lport);
164 } else { 156 } else {
165 FC_DEBUG_LPORT("Received an CREATED event on " 157 FC_LPORT_DBG(lport, "Received an CREATED event "
166 "port (%6x) for the directory " 158 "on port (%6x) for the directory "
167 "server, but the lport is not " 159 "server, but the lport is not "
168 "in the DNS state, it's in the " 160 "in the DNS state, it's in the "
169 "%d state", rport->port_id, 161 "%d state", rport->port_id,
170 lport->state); 162 lport->state);
171 lport->tt.rport_logoff(rport); 163 lport->tt.rport_logoff(rport);
172 } 164 }
173 mutex_unlock(&lport->lp_mutex); 165 mutex_unlock(&lport->lp_mutex);
174 } else 166 } else
175 FC_DEBUG_LPORT("Received an event for port (%6x) " 167 FC_LPORT_DBG(lport, "Received an event for port (%6x) "
176 "which is not the directory server\n", 168 "which is not the directory server\n",
177 rport->port_id); 169 rport->port_id);
178 break; 170 break;
179 case RPORT_EV_LOGO: 171 case RPORT_EV_LOGO:
180 case RPORT_EV_FAILED: 172 case RPORT_EV_FAILED:
@@ -185,9 +177,9 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
185 mutex_unlock(&lport->lp_mutex); 177 mutex_unlock(&lport->lp_mutex);
186 178
187 } else 179 } else
188 FC_DEBUG_LPORT("Received an event for port (%6x) " 180 FC_LPORT_DBG(lport, "Received an event for port (%6x) "
189 "which is not the directory server\n", 181 "which is not the directory server\n",
190 rport->port_id); 182 rport->port_id);
191 break; 183 break;
192 case RPORT_EV_NONE: 184 case RPORT_EV_NONE:
193 break; 185 break;
@@ -363,8 +355,8 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
363static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, 355static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
364 struct fc_lport *lport) 356 struct fc_lport *lport)
365{ 357{
366 FC_DEBUG_LPORT("Received RLIR request while in state %s\n", 358 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
367 fc_lport_state(lport)); 359 fc_lport_state(lport));
368 360
369 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); 361 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
370 fc_frame_free(fp); 362 fc_frame_free(fp);
@@ -389,8 +381,8 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
389 void *dp; 381 void *dp;
390 u32 f_ctl; 382 u32 f_ctl;
391 383
392 FC_DEBUG_LPORT("Received RLIR request while in state %s\n", 384 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
393 fc_lport_state(lport)); 385 fc_lport_state(lport));
394 386
395 len = fr_len(in_fp) - sizeof(struct fc_frame_header); 387 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
396 pp = fc_frame_payload_get(in_fp, len); 388 pp = fc_frame_payload_get(in_fp, len);
@@ -437,8 +429,8 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
437 size_t len; 429 size_t len;
438 u32 f_ctl; 430 u32 f_ctl;
439 431
440 FC_DEBUG_LPORT("Received RNID request while in state %s\n", 432 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
441 fc_lport_state(lport)); 433 fc_lport_state(lport));
442 434
443 req = fc_frame_payload_get(in_fp, sizeof(*req)); 435 req = fc_frame_payload_get(in_fp, sizeof(*req));
444 if (!req) { 436 if (!req) {
@@ -498,8 +490,8 @@ static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
498 size_t len; 490 size_t len;
499 u32 f_ctl; 491 u32 f_ctl;
500 492
501 FC_DEBUG_LPORT("Received ADISC request while in state %s\n", 493 FC_LPORT_DBG(lport, "Received ADISC request while in state %s\n",
502 fc_lport_state(lport)); 494 fc_lport_state(lport));
503 495
504 req = fc_frame_payload_get(in_fp, sizeof(*req)); 496 req = fc_frame_payload_get(in_fp, sizeof(*req));
505 if (!req) { 497 if (!req) {
@@ -574,8 +566,8 @@ EXPORT_SYMBOL(fc_fabric_login);
574 */ 566 */
575void fc_linkup(struct fc_lport *lport) 567void fc_linkup(struct fc_lport *lport)
576{ 568{
577 FC_DEBUG_LPORT("Link is up for port (%6x)\n", 569 printk(KERN_INFO "libfc: Link up on port (%6x)\n",
578 fc_host_port_id(lport->host)); 570 fc_host_port_id(lport->host));
579 571
580 mutex_lock(&lport->lp_mutex); 572 mutex_lock(&lport->lp_mutex);
581 if (!lport->link_up) { 573 if (!lport->link_up) {
@@ -595,8 +587,8 @@ EXPORT_SYMBOL(fc_linkup);
595void fc_linkdown(struct fc_lport *lport) 587void fc_linkdown(struct fc_lport *lport)
596{ 588{
597 mutex_lock(&lport->lp_mutex); 589 mutex_lock(&lport->lp_mutex);
598 FC_DEBUG_LPORT("Link is down for port (%6x)\n", 590 printk(KERN_INFO "libfc: Link down on port (%6x)\n",
599 fc_host_port_id(lport->host)); 591 fc_host_port_id(lport->host));
600 592
601 if (lport->link_up) { 593 if (lport->link_up) {
602 lport->link_up = 0; 594 lport->link_up = 0;
@@ -701,12 +693,11 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
701{ 693{
702 switch (event) { 694 switch (event) {
703 case DISC_EV_SUCCESS: 695 case DISC_EV_SUCCESS:
704 FC_DEBUG_LPORT("Got a SUCCESS event for port (%6x)\n", 696 FC_LPORT_DBG(lport, "Discovery succeeded\n");
705 fc_host_port_id(lport->host));
706 break; 697 break;
707 case DISC_EV_FAILED: 698 case DISC_EV_FAILED:
708 FC_DEBUG_LPORT("Got a FAILED event for port (%6x)\n", 699 printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n",
709 fc_host_port_id(lport->host)); 700 fc_host_port_id(lport->host));
710 mutex_lock(&lport->lp_mutex); 701 mutex_lock(&lport->lp_mutex);
711 fc_lport_enter_reset(lport); 702 fc_lport_enter_reset(lport);
712 mutex_unlock(&lport->lp_mutex); 703 mutex_unlock(&lport->lp_mutex);
@@ -726,8 +717,8 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
726 */ 717 */
727static void fc_lport_enter_ready(struct fc_lport *lport) 718static void fc_lport_enter_ready(struct fc_lport *lport)
728{ 719{
729 FC_DEBUG_LPORT("Port (%6x) entered Ready from state %s\n", 720 FC_LPORT_DBG(lport, "Entered READY from state %s\n",
730 fc_host_port_id(lport->host), fc_lport_state(lport)); 721 fc_lport_state(lport));
731 722
732 fc_lport_state_enter(lport, LPORT_ST_READY); 723 fc_lport_state_enter(lport, LPORT_ST_READY);
733 724
@@ -762,8 +753,8 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
762 u32 local_fid; 753 u32 local_fid;
763 u32 f_ctl; 754 u32 f_ctl;
764 755
765 FC_DEBUG_LPORT("Received FLOGI request while in state %s\n", 756 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
766 fc_lport_state(lport)); 757 fc_lport_state(lport));
767 758
768 fh = fc_frame_header_get(rx_fp); 759 fh = fc_frame_header_get(rx_fp);
769 remote_fid = ntoh24(fh->fh_s_id); 760 remote_fid = ntoh24(fh->fh_s_id);
@@ -772,12 +763,11 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
772 goto out; 763 goto out;
773 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); 764 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
774 if (remote_wwpn == lport->wwpn) { 765 if (remote_wwpn == lport->wwpn) {
775 FC_DBG("FLOGI from port with same WWPN %llx " 766 printk(KERN_WARNING "libfc: Received FLOGI from port "
776 "possible configuration error\n", 767 "with same WWPN %llx\n", remote_wwpn);
777 (unsigned long long)remote_wwpn);
778 goto out; 768 goto out;
779 } 769 }
780 FC_DBG("FLOGI from port WWPN %llx\n", (unsigned long long)remote_wwpn); 770 FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);
781 771
782 /* 772 /*
783 * XXX what is the right thing to do for FIDs? 773 * XXX what is the right thing to do for FIDs?
@@ -909,7 +899,8 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
909 } 899 }
910 } 900 }
911 } else { 901 } else {
912 FC_DBG("dropping invalid frame (eof %x)\n", fr_eof(fp)); 902 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
903 fr_eof(fp));
913 fc_frame_free(fp); 904 fc_frame_free(fp);
914 } 905 }
915 mutex_unlock(&lport->lp_mutex); 906 mutex_unlock(&lport->lp_mutex);
@@ -947,8 +938,8 @@ EXPORT_SYMBOL(fc_lport_reset);
947 */ 938 */
948static void fc_lport_enter_reset(struct fc_lport *lport) 939static void fc_lport_enter_reset(struct fc_lport *lport)
949{ 940{
950 FC_DEBUG_LPORT("Port (%6x) entered RESET state from %s state\n", 941 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
951 fc_host_port_id(lport->host), fc_lport_state(lport)); 942 fc_lport_state(lport));
952 943
953 fc_lport_state_enter(lport, LPORT_ST_RESET); 944 fc_lport_state_enter(lport, LPORT_ST_RESET);
954 945
@@ -982,9 +973,9 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
982static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) 973static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
983{ 974{
984 unsigned long delay = 0; 975 unsigned long delay = 0;
985 FC_DEBUG_LPORT("Error %ld in state %s, retries %d\n", 976 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
986 PTR_ERR(fp), fc_lport_state(lport), 977 PTR_ERR(fp), fc_lport_state(lport),
987 lport->retry_count); 978 lport->retry_count);
988 979
989 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { 980 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
990 /* 981 /*
@@ -1040,11 +1031,11 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1040 1031
1041 mutex_lock(&lport->lp_mutex); 1032 mutex_lock(&lport->lp_mutex);
1042 1033
1043 FC_DEBUG_LPORT("Received a RFT_ID response\n"); 1034 FC_LPORT_DBG(lport, "Received a RFT_ID response\n");
1044 1035
1045 if (lport->state != LPORT_ST_RFT_ID) { 1036 if (lport->state != LPORT_ST_RFT_ID) {
1046 FC_DBG("Received a RFT_ID response, but in state %s\n", 1037 FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state "
1047 fc_lport_state(lport)); 1038 "%s\n", fc_lport_state(lport));
1048 if (IS_ERR(fp)) 1039 if (IS_ERR(fp))
1049 goto err; 1040 goto err;
1050 goto out; 1041 goto out;
@@ -1094,11 +1085,11 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1094 1085
1095 mutex_lock(&lport->lp_mutex); 1086 mutex_lock(&lport->lp_mutex);
1096 1087
1097 FC_DEBUG_LPORT("Received a RPN_ID response\n"); 1088 FC_LPORT_DBG(lport, "Received a RPN_ID response\n");
1098 1089
1099 if (lport->state != LPORT_ST_RPN_ID) { 1090 if (lport->state != LPORT_ST_RPN_ID) {
1100 FC_DBG("Received a RPN_ID response, but in state %s\n", 1091 FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state "
1101 fc_lport_state(lport)); 1092 "%s\n", fc_lport_state(lport));
1102 if (IS_ERR(fp)) 1093 if (IS_ERR(fp))
1103 goto err; 1094 goto err;
1104 goto out; 1095 goto out;
@@ -1146,11 +1137,11 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1146 1137
1147 mutex_lock(&lport->lp_mutex); 1138 mutex_lock(&lport->lp_mutex);
1148 1139
1149 FC_DEBUG_LPORT("Received a SCR response\n"); 1140 FC_LPORT_DBG(lport, "Received a SCR response\n");
1150 1141
1151 if (lport->state != LPORT_ST_SCR) { 1142 if (lport->state != LPORT_ST_SCR) {
1152 FC_DBG("Received a SCR response, but in state %s\n", 1143 FC_LPORT_DBG(lport, "Received a SCR response, but in state "
1153 fc_lport_state(lport)); 1144 "%s\n", fc_lport_state(lport));
1154 if (IS_ERR(fp)) 1145 if (IS_ERR(fp))
1155 goto err; 1146 goto err;
1156 goto out; 1147 goto out;
@@ -1184,8 +1175,8 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
1184{ 1175{
1185 struct fc_frame *fp; 1176 struct fc_frame *fp;
1186 1177
1187 FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n", 1178 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
1188 fc_host_port_id(lport->host), fc_lport_state(lport)); 1179 fc_lport_state(lport));
1189 1180
1190 fc_lport_state_enter(lport, LPORT_ST_SCR); 1181 fc_lport_state_enter(lport, LPORT_ST_SCR);
1191 1182
@@ -1213,8 +1204,8 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport)
1213 struct fc_ns_fts *lps; 1204 struct fc_ns_fts *lps;
1214 int i; 1205 int i;
1215 1206
1216 FC_DEBUG_LPORT("Port (%6x) entered RFT_ID state from %s state\n", 1207 FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n",
1217 fc_host_port_id(lport->host), fc_lport_state(lport)); 1208 fc_lport_state(lport));
1218 1209
1219 fc_lport_state_enter(lport, LPORT_ST_RFT_ID); 1210 fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
1220 1211
@@ -1253,8 +1244,8 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport)
1253{ 1244{
1254 struct fc_frame *fp; 1245 struct fc_frame *fp;
1255 1246
1256 FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n", 1247 FC_LPORT_DBG(lport, "Entered RPN_ID state from %s state\n",
1257 fc_host_port_id(lport->host), fc_lport_state(lport)); 1248 fc_lport_state(lport));
1258 1249
1259 fc_lport_state_enter(lport, LPORT_ST_RPN_ID); 1250 fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
1260 1251
@@ -1294,8 +1285,8 @@ static void fc_lport_enter_dns(struct fc_lport *lport)
1294 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN; 1285 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
1295 dp.lp = lport; 1286 dp.lp = lport;
1296 1287
1297 FC_DEBUG_LPORT("Port (%6x) entered DNS state from %s state\n", 1288 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
1298 fc_host_port_id(lport->host), fc_lport_state(lport)); 1289 fc_lport_state(lport));
1299 1290
1300 fc_lport_state_enter(lport, LPORT_ST_DNS); 1291 fc_lport_state_enter(lport, LPORT_ST_DNS);
1301 1292
@@ -1374,11 +1365,11 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1374 1365
1375 mutex_lock(&lport->lp_mutex); 1366 mutex_lock(&lport->lp_mutex);
1376 1367
1377 FC_DEBUG_LPORT("Received a LOGO response\n"); 1368 FC_LPORT_DBG(lport, "Received a LOGO response\n");
1378 1369
1379 if (lport->state != LPORT_ST_LOGO) { 1370 if (lport->state != LPORT_ST_LOGO) {
1380 FC_DBG("Received a LOGO response, but in state %s\n", 1371 FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
1381 fc_lport_state(lport)); 1372 "%s\n", fc_lport_state(lport));
1382 if (IS_ERR(fp)) 1373 if (IS_ERR(fp))
1383 goto err; 1374 goto err;
1384 goto out; 1375 goto out;
@@ -1413,8 +1404,8 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
1413 struct fc_frame *fp; 1404 struct fc_frame *fp;
1414 struct fc_els_logo *logo; 1405 struct fc_els_logo *logo;
1415 1406
1416 FC_DEBUG_LPORT("Port (%6x) entered LOGO state from %s state\n", 1407 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
1417 fc_host_port_id(lport->host), fc_lport_state(lport)); 1408 fc_lport_state(lport));
1418 1409
1419 fc_lport_state_enter(lport, LPORT_ST_LOGO); 1410 fc_lport_state_enter(lport, LPORT_ST_LOGO);
1420 1411
@@ -1456,11 +1447,11 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1456 1447
1457 mutex_lock(&lport->lp_mutex); 1448 mutex_lock(&lport->lp_mutex);
1458 1449
1459 FC_DEBUG_LPORT("Received a FLOGI response\n"); 1450 FC_LPORT_DBG(lport, "Received a FLOGI response\n");
1460 1451
1461 if (lport->state != LPORT_ST_FLOGI) { 1452 if (lport->state != LPORT_ST_FLOGI) {
1462 FC_DBG("Received a FLOGI response, but in state %s\n", 1453 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
1463 fc_lport_state(lport)); 1454 "%s\n", fc_lport_state(lport));
1464 if (IS_ERR(fp)) 1455 if (IS_ERR(fp))
1465 goto err; 1456 goto err;
1466 goto out; 1457 goto out;
@@ -1475,7 +1466,8 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1475 did = ntoh24(fh->fh_d_id); 1466 did = ntoh24(fh->fh_d_id);
1476 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { 1467 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
1477 1468
1478 FC_DEBUG_LPORT("Assigned fid %x\n", did); 1469 printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n",
1470 did);
1479 fc_host_port_id(lport->host) = did; 1471 fc_host_port_id(lport->host) = did;
1480 1472
1481 flp = fc_frame_payload_get(fp, sizeof(*flp)); 1473 flp = fc_frame_payload_get(fp, sizeof(*flp));
@@ -1494,7 +1486,8 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1494 if (e_d_tov > lport->e_d_tov) 1486 if (e_d_tov > lport->e_d_tov)
1495 lport->e_d_tov = e_d_tov; 1487 lport->e_d_tov = e_d_tov;
1496 lport->r_a_tov = 2 * e_d_tov; 1488 lport->r_a_tov = 2 * e_d_tov;
1497 FC_DBG("Point-to-Point mode\n"); 1489 printk(KERN_INFO "libfc: Port (%6x) entered "
1490 "point to point mode\n", did);
1498 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id), 1491 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
1499 get_unaligned_be64( 1492 get_unaligned_be64(
1500 &flp->fl_wwpn), 1493 &flp->fl_wwpn),
@@ -1517,7 +1510,7 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1517 } 1510 }
1518 } 1511 }
1519 } else { 1512 } else {
1520 FC_DBG("bad FLOGI response\n"); 1513 FC_LPORT_DBG(lport, "Bad FLOGI response\n");
1521 } 1514 }
1522 1515
1523out: 1516out:
@@ -1537,7 +1530,8 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
1537{ 1530{
1538 struct fc_frame *fp; 1531 struct fc_frame *fp;
1539 1532
1540 FC_DEBUG_LPORT("Processing FLOGI state\n"); 1533 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
1534 fc_lport_state(lport));
1541 1535
1542 fc_lport_state_enter(lport, LPORT_ST_FLOGI); 1536 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1543 1537
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 747d73c5c8af..7162385f52eb 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -55,14 +55,6 @@
55#include <scsi/libfc.h> 55#include <scsi/libfc.h>
56#include <scsi/fc_encode.h> 56#include <scsi/fc_encode.h>
57 57
58static int fc_rport_debug;
59
60#define FC_DEBUG_RPORT(fmt...) \
61 do { \
62 if (fc_rport_debug) \
63 FC_DBG(fmt); \
64 } while (0)
65
66struct workqueue_struct *rport_event_queue; 58struct workqueue_struct *rport_event_queue;
67 59
68static void fc_rport_enter_plogi(struct fc_rport *); 60static void fc_rport_enter_plogi(struct fc_rport *);
@@ -97,7 +89,7 @@ static const char *fc_rport_state_names[] = {
97static void fc_rport_rogue_destroy(struct device *dev) 89static void fc_rport_rogue_destroy(struct device *dev)
98{ 90{
99 struct fc_rport *rport = dev_to_rport(dev); 91 struct fc_rport *rport = dev_to_rport(dev);
100 FC_DEBUG_RPORT("Destroying rogue rport (%6x)\n", rport->port_id); 92 FC_RPORT_DBG(rport, "Destroying rogue rport\n");
101 kfree(rport); 93 kfree(rport);
102} 94}
103 95
@@ -263,8 +255,8 @@ static void fc_rport_work(struct work_struct *work)
263 255
264 fc_rport_state_enter(new_rport, RPORT_ST_READY); 256 fc_rport_state_enter(new_rport, RPORT_ST_READY);
265 } else { 257 } else {
266 FC_DBG("Failed to create the rport for port " 258 printk(KERN_WARNING "libfc: Failed to allocate "
267 "(%6x).\n", ids.port_id); 259 " memory for rport (%6x)\n", ids.port_id);
268 event = RPORT_EV_FAILED; 260 event = RPORT_EV_FAILED;
269 } 261 }
270 if (rport->port_id != FC_FID_DIR_SERV) 262 if (rport->port_id != FC_FID_DIR_SERV)
@@ -309,7 +301,7 @@ int fc_rport_login(struct fc_rport *rport)
309 301
310 mutex_lock(&rdata->rp_mutex); 302 mutex_lock(&rdata->rp_mutex);
311 303
312 FC_DEBUG_RPORT("Login to port (%6x)\n", rport->port_id); 304 FC_RPORT_DBG(rport, "Login to port\n");
313 305
314 fc_rport_enter_plogi(rport); 306 fc_rport_enter_plogi(rport);
315 307
@@ -329,16 +321,13 @@ int fc_rport_login(struct fc_rport *rport)
329int fc_rport_logoff(struct fc_rport *rport) 321int fc_rport_logoff(struct fc_rport *rport)
330{ 322{
331 struct fc_rport_libfc_priv *rdata = rport->dd_data; 323 struct fc_rport_libfc_priv *rdata = rport->dd_data;
332 struct fc_lport *lport = rdata->local_port;
333 324
334 mutex_lock(&rdata->rp_mutex); 325 mutex_lock(&rdata->rp_mutex);
335 326
336 FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id); 327 FC_RPORT_DBG(rport, "Remove port\n");
337 328
338 if (rdata->rp_state == RPORT_ST_NONE) { 329 if (rdata->rp_state == RPORT_ST_NONE) {
339 FC_DEBUG_RPORT("(%6x): Port (%6x) in NONE state," 330 FC_RPORT_DBG(rport, "Port in NONE state, not removing\n");
340 " not removing", fc_host_port_id(lport->host),
341 rport->port_id);
342 mutex_unlock(&rdata->rp_mutex); 331 mutex_unlock(&rdata->rp_mutex);
343 goto out; 332 goto out;
344 } 333 }
@@ -379,7 +368,7 @@ static void fc_rport_enter_ready(struct fc_rport *rport)
379 368
380 fc_rport_state_enter(rport, RPORT_ST_READY); 369 fc_rport_state_enter(rport, RPORT_ST_READY);
381 370
382 FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id); 371 FC_RPORT_DBG(rport, "Port is Ready\n");
383 372
384 rdata->event = RPORT_EV_CREATED; 373 rdata->event = RPORT_EV_CREATED;
385 queue_work(rport_event_queue, &rdata->event_work); 374 queue_work(rport_event_queue, &rdata->event_work);
@@ -436,8 +425,8 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
436{ 425{
437 struct fc_rport_libfc_priv *rdata = rport->dd_data; 426 struct fc_rport_libfc_priv *rdata = rport->dd_data;
438 427
439 FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n", 428 FC_RPORT_DBG(rport, "Error %ld in state %s, retries %d\n",
440 PTR_ERR(fp), fc_rport_state(rport), rdata->retries); 429 PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
441 430
442 switch (rdata->rp_state) { 431 switch (rdata->rp_state) {
443 case RPORT_ST_PLOGI: 432 case RPORT_ST_PLOGI:
@@ -478,9 +467,9 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp)
478 if (PTR_ERR(fp) == -FC_EX_CLOSED) 467 if (PTR_ERR(fp) == -FC_EX_CLOSED)
479 return fc_rport_error(rport, fp); 468 return fc_rport_error(rport, fp);
480 469
481 if (rdata->retries < rdata->local_port->max_retry_count) { 470 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
482 FC_DEBUG_RPORT("Error %ld in state %s, retrying\n", 471 FC_RPORT_DBG(rport, "Error %ld in state %s, retrying\n",
483 PTR_ERR(fp), fc_rport_state(rport)); 472 PTR_ERR(fp), fc_rport_state(rport));
484 rdata->retries++; 473 rdata->retries++;
485 /* no additional delay on exchange timeouts */ 474 /* no additional delay on exchange timeouts */
486 if (PTR_ERR(fp) == -FC_EX_TIMEOUT) 475 if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
@@ -517,12 +506,11 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
517 506
518 mutex_lock(&rdata->rp_mutex); 507 mutex_lock(&rdata->rp_mutex);
519 508
520 FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n", 509 FC_RPORT_DBG(rport, "Received a PLOGI response\n");
521 rport->port_id);
522 510
523 if (rdata->rp_state != RPORT_ST_PLOGI) { 511 if (rdata->rp_state != RPORT_ST_PLOGI) {
524 FC_DBG("Received a PLOGI response, but in state %s\n", 512 FC_RPORT_DBG(rport, "Received a PLOGI response, but in state "
525 fc_rport_state(rport)); 513 "%s\n", fc_rport_state(rport));
526 if (IS_ERR(fp)) 514 if (IS_ERR(fp))
527 goto err; 515 goto err;
528 goto out; 516 goto out;
@@ -583,8 +571,8 @@ static void fc_rport_enter_plogi(struct fc_rport *rport)
583 struct fc_lport *lport = rdata->local_port; 571 struct fc_lport *lport = rdata->local_port;
584 struct fc_frame *fp; 572 struct fc_frame *fp;
585 573
586 FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n", 574 FC_RPORT_DBG(rport, "Port entered PLOGI state from %s state\n",
587 rport->port_id, fc_rport_state(rport)); 575 fc_rport_state(rport));
588 576
589 fc_rport_state_enter(rport, RPORT_ST_PLOGI); 577 fc_rport_state_enter(rport, RPORT_ST_PLOGI);
590 578
@@ -628,12 +616,11 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
628 616
629 mutex_lock(&rdata->rp_mutex); 617 mutex_lock(&rdata->rp_mutex);
630 618
631 FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n", 619 FC_RPORT_DBG(rport, "Received a PRLI response\n");
632 rport->port_id);
633 620
634 if (rdata->rp_state != RPORT_ST_PRLI) { 621 if (rdata->rp_state != RPORT_ST_PRLI) {
635 FC_DBG("Received a PRLI response, but in state %s\n", 622 FC_RPORT_DBG(rport, "Received a PRLI response, but in state "
636 fc_rport_state(rport)); 623 "%s\n", fc_rport_state(rport));
637 if (IS_ERR(fp)) 624 if (IS_ERR(fp))
638 goto err; 625 goto err;
639 goto out; 626 goto out;
@@ -663,7 +650,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
663 fc_rport_enter_rtv(rport); 650 fc_rport_enter_rtv(rport);
664 651
665 } else { 652 } else {
666 FC_DBG("Bad ELS response\n"); 653 FC_RPORT_DBG(rport, "Bad ELS response for PRLI command\n");
667 rdata->event = RPORT_EV_FAILED; 654 rdata->event = RPORT_EV_FAILED;
668 fc_rport_state_enter(rport, RPORT_ST_NONE); 655 fc_rport_state_enter(rport, RPORT_ST_NONE);
669 queue_work(rport_event_queue, &rdata->event_work); 656 queue_work(rport_event_queue, &rdata->event_work);
@@ -695,12 +682,11 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
695 682
696 mutex_lock(&rdata->rp_mutex); 683 mutex_lock(&rdata->rp_mutex);
697 684
698 FC_DEBUG_RPORT("Received a LOGO response from port (%6x)\n", 685 FC_RPORT_DBG(rport, "Received a LOGO response\n");
699 rport->port_id);
700 686
701 if (rdata->rp_state != RPORT_ST_LOGO) { 687 if (rdata->rp_state != RPORT_ST_LOGO) {
702 FC_DEBUG_RPORT("Received a LOGO response, but in state %s\n", 688 FC_RPORT_DBG(rport, "Received a LOGO response, but in state "
703 fc_rport_state(rport)); 689 "%s\n", fc_rport_state(rport));
704 if (IS_ERR(fp)) 690 if (IS_ERR(fp))
705 goto err; 691 goto err;
706 goto out; 692 goto out;
@@ -715,7 +701,7 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
715 if (op == ELS_LS_ACC) { 701 if (op == ELS_LS_ACC) {
716 fc_rport_enter_rtv(rport); 702 fc_rport_enter_rtv(rport);
717 } else { 703 } else {
718 FC_DBG("Bad ELS response\n"); 704 FC_RPORT_DBG(rport, "Bad ELS response for LOGO command\n");
719 rdata->event = RPORT_EV_LOGO; 705 rdata->event = RPORT_EV_LOGO;
720 fc_rport_state_enter(rport, RPORT_ST_NONE); 706 fc_rport_state_enter(rport, RPORT_ST_NONE);
721 queue_work(rport_event_queue, &rdata->event_work); 707 queue_work(rport_event_queue, &rdata->event_work);
@@ -745,8 +731,8 @@ static void fc_rport_enter_prli(struct fc_rport *rport)
745 } *pp; 731 } *pp;
746 struct fc_frame *fp; 732 struct fc_frame *fp;
747 733
748 FC_DEBUG_RPORT("Port (%6x) entered PRLI state from %s state\n", 734 FC_RPORT_DBG(rport, "Port entered PRLI state from %s state\n",
749 rport->port_id, fc_rport_state(rport)); 735 fc_rport_state(rport));
750 736
751 fc_rport_state_enter(rport, RPORT_ST_PRLI); 737 fc_rport_state_enter(rport, RPORT_ST_PRLI);
752 738
@@ -784,12 +770,11 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
784 770
785 mutex_lock(&rdata->rp_mutex); 771 mutex_lock(&rdata->rp_mutex);
786 772
787 FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n", 773 FC_RPORT_DBG(rport, "Received a RTV response\n");
788 rport->port_id);
789 774
790 if (rdata->rp_state != RPORT_ST_RTV) { 775 if (rdata->rp_state != RPORT_ST_RTV) {
791 FC_DBG("Received a RTV response, but in state %s\n", 776 FC_RPORT_DBG(rport, "Received a RTV response, but in state "
792 fc_rport_state(rport)); 777 "%s\n", fc_rport_state(rport));
793 if (IS_ERR(fp)) 778 if (IS_ERR(fp))
794 goto err; 779 goto err;
795 goto out; 780 goto out;
@@ -844,8 +829,8 @@ static void fc_rport_enter_rtv(struct fc_rport *rport)
844 struct fc_rport_libfc_priv *rdata = rport->dd_data; 829 struct fc_rport_libfc_priv *rdata = rport->dd_data;
845 struct fc_lport *lport = rdata->local_port; 830 struct fc_lport *lport = rdata->local_port;
846 831
847 FC_DEBUG_RPORT("Port (%6x) entered RTV state from %s state\n", 832 FC_RPORT_DBG(rport, "Port entered RTV state from %s state\n",
848 rport->port_id, fc_rport_state(rport)); 833 fc_rport_state(rport));
849 834
850 fc_rport_state_enter(rport, RPORT_ST_RTV); 835 fc_rport_state_enter(rport, RPORT_ST_RTV);
851 836
@@ -875,8 +860,8 @@ static void fc_rport_enter_logo(struct fc_rport *rport)
875 struct fc_lport *lport = rdata->local_port; 860 struct fc_lport *lport = rdata->local_port;
876 struct fc_frame *fp; 861 struct fc_frame *fp;
877 862
878 FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n", 863 FC_RPORT_DBG(rport, "Port entered LOGO state from %s state\n",
879 rport->port_id, fc_rport_state(rport)); 864 fc_rport_state(rport));
880 865
881 fc_rport_state_enter(rport, RPORT_ST_LOGO); 866 fc_rport_state_enter(rport, RPORT_ST_LOGO);
882 867
@@ -983,14 +968,13 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport,
983 968
984 fh = fc_frame_header_get(fp); 969 fh = fc_frame_header_get(fp);
985 970
986 FC_DEBUG_RPORT("Received PLOGI request from port (%6x) " 971 FC_RPORT_DBG(rport, "Received PLOGI request while in state %s\n",
987 "while in state %s\n", ntoh24(fh->fh_s_id), 972 fc_rport_state(rport));
988 fc_rport_state(rport));
989 973
990 sid = ntoh24(fh->fh_s_id); 974 sid = ntoh24(fh->fh_s_id);
991 pl = fc_frame_payload_get(fp, sizeof(*pl)); 975 pl = fc_frame_payload_get(fp, sizeof(*pl));
992 if (!pl) { 976 if (!pl) {
993 FC_DBG("incoming PLOGI from %x too short\n", sid); 977 FC_RPORT_DBG(rport, "Received PLOGI too short\n");
994 WARN_ON(1); 978 WARN_ON(1);
995 /* XXX TBD: send reject? */ 979 /* XXX TBD: send reject? */
996 fc_frame_free(fp); 980 fc_frame_free(fp);
@@ -1012,26 +996,26 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport,
1012 */ 996 */
1013 switch (rdata->rp_state) { 997 switch (rdata->rp_state) {
1014 case RPORT_ST_INIT: 998 case RPORT_ST_INIT:
1015 FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT " 999 FC_RPORT_DBG(rport, "Received PLOGI, wwpn %llx state INIT "
1016 "- reject\n", sid, (unsigned long long)wwpn); 1000 "- reject\n", (unsigned long long)wwpn);
1017 reject = ELS_RJT_UNSUP; 1001 reject = ELS_RJT_UNSUP;
1018 break; 1002 break;
1019 case RPORT_ST_PLOGI: 1003 case RPORT_ST_PLOGI:
1020 FC_DEBUG_RPORT("incoming PLOGI from %x in PLOGI state %d\n", 1004 FC_RPORT_DBG(rport, "Received PLOGI in PLOGI state %d\n",
1021 sid, rdata->rp_state); 1005 rdata->rp_state);
1022 if (wwpn < lport->wwpn) 1006 if (wwpn < lport->wwpn)
1023 reject = ELS_RJT_INPROG; 1007 reject = ELS_RJT_INPROG;
1024 break; 1008 break;
1025 case RPORT_ST_PRLI: 1009 case RPORT_ST_PRLI:
1026 case RPORT_ST_READY: 1010 case RPORT_ST_READY:
1027 FC_DEBUG_RPORT("incoming PLOGI from %x in logged-in state %d " 1011 FC_RPORT_DBG(rport, "Received PLOGI in logged-in state %d "
1028 "- ignored for now\n", sid, rdata->rp_state); 1012 "- ignored for now\n", rdata->rp_state);
1029 /* XXX TBD - should reset */ 1013 /* XXX TBD - should reset */
1030 break; 1014 break;
1031 case RPORT_ST_NONE: 1015 case RPORT_ST_NONE:
1032 default: 1016 default:
1033 FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected " 1017 FC_RPORT_DBG(rport, "Received PLOGI in unexpected "
1034 "state %d\n", sid, rdata->rp_state); 1018 "state %d\n", rdata->rp_state);
1035 fc_frame_free(fp); 1019 fc_frame_free(fp);
1036 return; 1020 return;
1037 break; 1021 break;
@@ -1115,9 +1099,8 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
1115 1099
1116 fh = fc_frame_header_get(rx_fp); 1100 fh = fc_frame_header_get(rx_fp);
1117 1101
1118 FC_DEBUG_RPORT("Received PRLI request from port (%6x) " 1102 FC_RPORT_DBG(rport, "Received PRLI request while in state %s\n",
1119 "while in state %s\n", ntoh24(fh->fh_s_id), 1103 fc_rport_state(rport));
1120 fc_rport_state(rport));
1121 1104
1122 switch (rdata->rp_state) { 1105 switch (rdata->rp_state) {
1123 case RPORT_ST_PRLI: 1106 case RPORT_ST_PRLI:
@@ -1252,9 +1235,8 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
1252 1235
1253 fh = fc_frame_header_get(fp); 1236 fh = fc_frame_header_get(fp);
1254 1237
1255 FC_DEBUG_RPORT("Received PRLO request from port (%6x) " 1238 FC_RPORT_DBG(rport, "Received PRLO request while in state %s\n",
1256 "while in state %s\n", ntoh24(fh->fh_s_id), 1239 fc_rport_state(rport));
1257 fc_rport_state(rport));
1258 1240
1259 if (rdata->rp_state == RPORT_ST_NONE) { 1241 if (rdata->rp_state == RPORT_ST_NONE) {
1260 fc_frame_free(fp); 1242 fc_frame_free(fp);
@@ -1286,9 +1268,8 @@ static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
1286 1268
1287 fh = fc_frame_header_get(fp); 1269 fh = fc_frame_header_get(fp);
1288 1270
1289 FC_DEBUG_RPORT("Received LOGO request from port (%6x) " 1271 FC_RPORT_DBG(rport, "Received LOGO request while in state %s\n",
1290 "while in state %s\n", ntoh24(fh->fh_s_id), 1272 fc_rport_state(rport));
1291 fc_rport_state(rport));
1292 1273
1293 if (rdata->rp_state == RPORT_ST_NONE) { 1274 if (rdata->rp_state == RPORT_ST_NONE) {
1294 fc_frame_free(fp); 1275 fc_frame_free(fp);
@@ -1308,7 +1289,6 @@ static void fc_rport_flush_queue(void)
1308 flush_workqueue(rport_event_queue); 1289 flush_workqueue(rport_event_queue);
1309} 1290}
1310 1291
1311
1312int fc_rport_init(struct fc_lport *lport) 1292int fc_rport_init(struct fc_lport *lport)
1313{ 1293{
1314 if (!lport->tt.rport_create) 1294 if (!lport->tt.rport_create)
@@ -1330,7 +1310,7 @@ int fc_rport_init(struct fc_lport *lport)
1330} 1310}
1331EXPORT_SYMBOL(fc_rport_init); 1311EXPORT_SYMBOL(fc_rport_init);
1332 1312
1333int fc_setup_rport() 1313int fc_setup_rport(void)
1334{ 1314{
1335 rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); 1315 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1336 if (!rport_event_queue) 1316 if (!rport_event_queue)
@@ -1339,7 +1319,7 @@ int fc_setup_rport()
1339} 1319}
1340EXPORT_SYMBOL(fc_setup_rport); 1320EXPORT_SYMBOL(fc_setup_rport);
1341 1321
1342void fc_destroy_rport() 1322void fc_destroy_rport(void)
1343{ 1323{
1344 destroy_workqueue(rport_event_queue); 1324 destroy_workqueue(rport_event_queue);
1345} 1325}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index e72b4ad47d35..716cc344c5df 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -38,15 +38,30 @@
38#include <scsi/scsi_transport_iscsi.h> 38#include <scsi/scsi_transport_iscsi.h>
39#include <scsi/libiscsi.h> 39#include <scsi/libiscsi.h>
40 40
41static int iscsi_dbg_lib; 41static int iscsi_dbg_lib_conn;
42module_param_named(debug_libiscsi, iscsi_dbg_lib, int, S_IRUGO | S_IWUSR); 42module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int,
43MODULE_PARM_DESC(debug_libiscsi, "Turn on debugging for libiscsi module. " 43 S_IRUGO | S_IWUSR);
44 "Set to 1 to turn on, and zero to turn off. Default " 44MODULE_PARM_DESC(debug_libiscsi_conn,
45 "is off."); 45 "Turn on debugging for connections in libiscsi module. "
46 "Set to 1 to turn on, and zero to turn off. Default is off.");
47
48static int iscsi_dbg_lib_session;
49module_param_named(debug_libiscsi_session, iscsi_dbg_lib_session, int,
50 S_IRUGO | S_IWUSR);
51MODULE_PARM_DESC(debug_libiscsi_session,
52 "Turn on debugging for sessions in libiscsi module. "
53 "Set to 1 to turn on, and zero to turn off. Default is off.");
54
55static int iscsi_dbg_lib_eh;
56module_param_named(debug_libiscsi_eh, iscsi_dbg_lib_eh, int,
57 S_IRUGO | S_IWUSR);
58MODULE_PARM_DESC(debug_libiscsi_eh,
59 "Turn on debugging for error handling in libiscsi module. "
60 "Set to 1 to turn on, and zero to turn off. Default is off.");
46 61
47#define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \ 62#define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \
48 do { \ 63 do { \
49 if (iscsi_dbg_lib) \ 64 if (iscsi_dbg_lib_conn) \
50 iscsi_conn_printk(KERN_INFO, _conn, \ 65 iscsi_conn_printk(KERN_INFO, _conn, \
51 "%s " dbg_fmt, \ 66 "%s " dbg_fmt, \
52 __func__, ##arg); \ 67 __func__, ##arg); \
@@ -54,7 +69,15 @@ MODULE_PARM_DESC(debug_libiscsi, "Turn on debugging for libiscsi module. "
54 69
55#define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \ 70#define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \
56 do { \ 71 do { \
57 if (iscsi_dbg_lib) \ 72 if (iscsi_dbg_lib_session) \
73 iscsi_session_printk(KERN_INFO, _session, \
74 "%s " dbg_fmt, \
75 __func__, ##arg); \
76 } while (0);
77
78#define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \
79 do { \
80 if (iscsi_dbg_lib_eh) \
58 iscsi_session_printk(KERN_INFO, _session, \ 81 iscsi_session_printk(KERN_INFO, _session, \
59 "%s " dbg_fmt, \ 82 "%s " dbg_fmt, \
60 __func__, ##arg); \ 83 __func__, ##arg); \
@@ -81,7 +104,8 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
81 struct Scsi_Host *shost = conn->session->host; 104 struct Scsi_Host *shost = conn->session->host;
82 struct iscsi_host *ihost = shost_priv(shost); 105 struct iscsi_host *ihost = shost_priv(shost);
83 106
84 queue_work(ihost->workq, &conn->xmitwork); 107 if (ihost->workq)
108 queue_work(ihost->workq, &conn->xmitwork);
85} 109}
86EXPORT_SYMBOL_GPL(iscsi_conn_queue_work); 110EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
87 111
@@ -109,11 +133,9 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
109 * if the window closed with IO queued, then kick the 133 * if the window closed with IO queued, then kick the
110 * xmit thread 134 * xmit thread
111 */ 135 */
112 if (!list_empty(&session->leadconn->xmitqueue) || 136 if (!list_empty(&session->leadconn->cmdqueue) ||
113 !list_empty(&session->leadconn->mgmtqueue)) { 137 !list_empty(&session->leadconn->mgmtqueue))
114 if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 138 iscsi_conn_queue_work(session->leadconn);
115 iscsi_conn_queue_work(session->leadconn);
116 }
117 } 139 }
118} 140}
119EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); 141EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
@@ -257,9 +279,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
257 itt_t itt; 279 itt_t itt;
258 int rc; 280 int rc;
259 281
260 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); 282 if (conn->session->tt->alloc_pdu) {
261 if (rc) 283 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
262 return rc; 284 if (rc)
285 return rc;
286 }
263 hdr = (struct iscsi_cmd *) task->hdr; 287 hdr = (struct iscsi_cmd *) task->hdr;
264 itt = hdr->itt; 288 itt = hdr->itt;
265 memset(hdr, 0, sizeof(*hdr)); 289 memset(hdr, 0, sizeof(*hdr));
@@ -364,7 +388,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
364 return -EIO; 388 return -EIO;
365 389
366 task->state = ISCSI_TASK_RUNNING; 390 task->state = ISCSI_TASK_RUNNING;
367 list_move_tail(&task->running, &conn->run_list);
368 391
369 conn->scsicmd_pdus_cnt++; 392 conn->scsicmd_pdus_cnt++;
370 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x " 393 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
@@ -380,26 +403,25 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
380} 403}
381 404
382/** 405/**
383 * iscsi_complete_command - finish a task 406 * iscsi_free_task - free a task
384 * @task: iscsi cmd task 407 * @task: iscsi cmd task
385 * 408 *
386 * Must be called with session lock. 409 * Must be called with session lock.
387 * This function returns the scsi command to scsi-ml or cleans 410 * This function returns the scsi command to scsi-ml or cleans
388 * up mgmt tasks then returns the task to the pool. 411 * up mgmt tasks then returns the task to the pool.
389 */ 412 */
390static void iscsi_complete_command(struct iscsi_task *task) 413static void iscsi_free_task(struct iscsi_task *task)
391{ 414{
392 struct iscsi_conn *conn = task->conn; 415 struct iscsi_conn *conn = task->conn;
393 struct iscsi_session *session = conn->session; 416 struct iscsi_session *session = conn->session;
394 struct scsi_cmnd *sc = task->sc; 417 struct scsi_cmnd *sc = task->sc;
395 418
419 ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
420 task->itt, task->state, task->sc);
421
396 session->tt->cleanup_task(task); 422 session->tt->cleanup_task(task);
397 list_del_init(&task->running); 423 task->state = ISCSI_TASK_FREE;
398 task->state = ISCSI_TASK_COMPLETED;
399 task->sc = NULL; 424 task->sc = NULL;
400
401 if (conn->task == task)
402 conn->task = NULL;
403 /* 425 /*
404 * login task is preallocated so do not free 426 * login task is preallocated so do not free
405 */ 427 */
@@ -408,9 +430,6 @@ static void iscsi_complete_command(struct iscsi_task *task)
408 430
409 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*)); 431 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
410 432
411 if (conn->ping_task == task)
412 conn->ping_task = NULL;
413
414 if (sc) { 433 if (sc) {
415 task->sc = NULL; 434 task->sc = NULL;
416 /* SCSI eh reuses commands to verify us */ 435 /* SCSI eh reuses commands to verify us */
@@ -433,7 +452,7 @@ EXPORT_SYMBOL_GPL(__iscsi_get_task);
433static void __iscsi_put_task(struct iscsi_task *task) 452static void __iscsi_put_task(struct iscsi_task *task)
434{ 453{
435 if (atomic_dec_and_test(&task->refcount)) 454 if (atomic_dec_and_test(&task->refcount))
436 iscsi_complete_command(task); 455 iscsi_free_task(task);
437} 456}
438 457
439void iscsi_put_task(struct iscsi_task *task) 458void iscsi_put_task(struct iscsi_task *task)
@@ -446,26 +465,74 @@ void iscsi_put_task(struct iscsi_task *task)
446} 465}
447EXPORT_SYMBOL_GPL(iscsi_put_task); 466EXPORT_SYMBOL_GPL(iscsi_put_task);
448 467
468/**
469 * iscsi_complete_task - finish a task
470 * @task: iscsi cmd task
471 * @state: state to complete task with
472 *
473 * Must be called with session lock.
474 */
475static void iscsi_complete_task(struct iscsi_task *task, int state)
476{
477 struct iscsi_conn *conn = task->conn;
478
479 ISCSI_DBG_SESSION(conn->session,
480 "complete task itt 0x%x state %d sc %p\n",
481 task->itt, task->state, task->sc);
482 if (task->state == ISCSI_TASK_COMPLETED ||
483 task->state == ISCSI_TASK_ABRT_TMF ||
484 task->state == ISCSI_TASK_ABRT_SESS_RECOV)
485 return;
486 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
487 task->state = state;
488
489 if (!list_empty(&task->running))
490 list_del_init(&task->running);
491
492 if (conn->task == task)
493 conn->task = NULL;
494
495 if (conn->ping_task == task)
496 conn->ping_task = NULL;
497
498 /* release get from queueing */
499 __iscsi_put_task(task);
500}
501
449/* 502/*
450 * session lock must be held 503 * session lock must be held and if not called for a task that is
504 * still pending or from the xmit thread, then xmit thread must
505 * be suspended.
451 */ 506 */
452static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task, 507static void fail_scsi_task(struct iscsi_task *task, int err)
453 int err)
454{ 508{
509 struct iscsi_conn *conn = task->conn;
455 struct scsi_cmnd *sc; 510 struct scsi_cmnd *sc;
511 int state;
456 512
513 /*
514 * if a command completes and we get a successful tmf response
515 * we will hit this because the scsi eh abort code does not take
516 * a ref to the task.
517 */
457 sc = task->sc; 518 sc = task->sc;
458 if (!sc) 519 if (!sc)
459 return; 520 return;
460 521
461 if (task->state == ISCSI_TASK_PENDING) 522 if (task->state == ISCSI_TASK_PENDING) {
462 /* 523 /*
463 * cmd never made it to the xmit thread, so we should not count 524 * cmd never made it to the xmit thread, so we should not count
464 * the cmd in the sequencing 525 * the cmd in the sequencing
465 */ 526 */
466 conn->session->queued_cmdsn--; 527 conn->session->queued_cmdsn--;
528 /* it was never sent so just complete like normal */
529 state = ISCSI_TASK_COMPLETED;
530 } else if (err == DID_TRANSPORT_DISRUPTED)
531 state = ISCSI_TASK_ABRT_SESS_RECOV;
532 else
533 state = ISCSI_TASK_ABRT_TMF;
467 534
468 sc->result = err; 535 sc->result = err << 16;
469 if (!scsi_bidi_cmnd(sc)) 536 if (!scsi_bidi_cmnd(sc))
470 scsi_set_resid(sc, scsi_bufflen(sc)); 537 scsi_set_resid(sc, scsi_bufflen(sc));
471 else { 538 else {
@@ -473,10 +540,7 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
473 scsi_in(sc)->resid = scsi_in(sc)->length; 540 scsi_in(sc)->resid = scsi_in(sc)->length;
474 } 541 }
475 542
476 if (conn->task == task) 543 iscsi_complete_task(task, state);
477 conn->task = NULL;
478 /* release ref from queuecommand */
479 __iscsi_put_task(task);
480} 544}
481 545
482static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, 546static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -516,7 +580,6 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
516 session->state = ISCSI_STATE_LOGGING_OUT; 580 session->state = ISCSI_STATE_LOGGING_OUT;
517 581
518 task->state = ISCSI_TASK_RUNNING; 582 task->state = ISCSI_TASK_RUNNING;
519 list_move_tail(&task->running, &conn->mgmt_run_list);
520 ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x " 583 ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
521 "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK, 584 "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
522 hdr->itt, task->data_count); 585 hdr->itt, task->data_count);
@@ -528,6 +591,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
528 char *data, uint32_t data_size) 591 char *data, uint32_t data_size)
529{ 592{
530 struct iscsi_session *session = conn->session; 593 struct iscsi_session *session = conn->session;
594 struct iscsi_host *ihost = shost_priv(session->host);
531 struct iscsi_task *task; 595 struct iscsi_task *task;
532 itt_t itt; 596 itt_t itt;
533 597
@@ -544,6 +608,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
544 */ 608 */
545 task = conn->login_task; 609 task = conn->login_task;
546 else { 610 else {
611 if (session->state != ISCSI_STATE_LOGGED_IN)
612 return NULL;
613
547 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); 614 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
548 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); 615 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
549 616
@@ -559,6 +626,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
559 atomic_set(&task->refcount, 1); 626 atomic_set(&task->refcount, 1);
560 task->conn = conn; 627 task->conn = conn;
561 task->sc = NULL; 628 task->sc = NULL;
629 INIT_LIST_HEAD(&task->running);
630 task->state = ISCSI_TASK_PENDING;
562 631
563 if (data_size) { 632 if (data_size) {
564 memcpy(task->data, data, data_size); 633 memcpy(task->data, data, data_size);
@@ -566,11 +635,14 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
566 } else 635 } else
567 task->data_count = 0; 636 task->data_count = 0;
568 637
569 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { 638 if (conn->session->tt->alloc_pdu) {
570 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate " 639 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
571 "pdu for mgmt task.\n"); 640 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
572 goto requeue_task; 641 "pdu for mgmt task.\n");
642 goto free_task;
643 }
573 } 644 }
645
574 itt = task->hdr->itt; 646 itt = task->hdr->itt;
575 task->hdr_len = sizeof(struct iscsi_hdr); 647 task->hdr_len = sizeof(struct iscsi_hdr);
576 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); 648 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
@@ -583,30 +655,22 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
583 task->conn->session->age); 655 task->conn->session->age);
584 } 656 }
585 657
586 INIT_LIST_HEAD(&task->running); 658 if (!ihost->workq) {
587 list_add_tail(&task->running, &conn->mgmtqueue);
588
589 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
590 if (iscsi_prep_mgmt_task(conn, task)) 659 if (iscsi_prep_mgmt_task(conn, task))
591 goto free_task; 660 goto free_task;
592 661
593 if (session->tt->xmit_task(task)) 662 if (session->tt->xmit_task(task))
594 goto free_task; 663 goto free_task;
595 664 } else {
596 } else 665 list_add_tail(&task->running, &conn->mgmtqueue);
597 iscsi_conn_queue_work(conn); 666 iscsi_conn_queue_work(conn);
667 }
598 668
599 return task; 669 return task;
600 670
601free_task: 671free_task:
602 __iscsi_put_task(task); 672 __iscsi_put_task(task);
603 return NULL; 673 return NULL;
604
605requeue_task:
606 if (task != conn->login_task)
607 __kfifo_put(session->cmdpool.queue, (void*)&task,
608 sizeof(void*));
609 return NULL;
610} 674}
611 675
612int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, 676int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -701,11 +765,10 @@ invalid_datalen:
701 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 765 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
702 } 766 }
703out: 767out:
704 ISCSI_DBG_SESSION(session, "done [sc %p res %d itt 0x%x]\n", 768 ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n",
705 sc, sc->result, task->itt); 769 sc, sc->result, task->itt);
706 conn->scsirsp_pdus_cnt++; 770 conn->scsirsp_pdus_cnt++;
707 771 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
708 __iscsi_put_task(task);
709} 772}
710 773
711/** 774/**
@@ -724,6 +787,7 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
724 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) 787 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
725 return; 788 return;
726 789
790 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr);
727 sc->result = (DID_OK << 16) | rhdr->cmd_status; 791 sc->result = (DID_OK << 16) | rhdr->cmd_status;
728 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 792 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
729 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | 793 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
@@ -738,8 +802,11 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
738 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 802 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
739 } 803 }
740 804
805 ISCSI_DBG_SESSION(conn->session, "data in with status done "
806 "[sc %p res %d itt 0x%x]\n",
807 sc, sc->result, task->itt);
741 conn->scsirsp_pdus_cnt++; 808 conn->scsirsp_pdus_cnt++;
742 __iscsi_put_task(task); 809 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
743} 810}
744 811
745static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) 812static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -823,7 +890,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
823 * 890 *
824 * The session lock must be held. 891 * The session lock must be held.
825 */ 892 */
826static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) 893struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
827{ 894{
828 struct iscsi_session *session = conn->session; 895 struct iscsi_session *session = conn->session;
829 int i; 896 int i;
@@ -840,6 +907,7 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
840 907
841 return session->cmds[i]; 908 return session->cmds[i];
842} 909}
910EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
843 911
844/** 912/**
845 * __iscsi_complete_pdu - complete pdu 913 * __iscsi_complete_pdu - complete pdu
@@ -909,6 +977,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
909 task = iscsi_itt_to_ctask(conn, hdr->itt); 977 task = iscsi_itt_to_ctask(conn, hdr->itt);
910 if (!task) 978 if (!task)
911 return ISCSI_ERR_BAD_ITT; 979 return ISCSI_ERR_BAD_ITT;
980 task->last_xfer = jiffies;
912 break; 981 break;
913 case ISCSI_OP_R2T: 982 case ISCSI_OP_R2T:
914 /* 983 /*
@@ -959,7 +1028,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
959 } 1028 }
960 1029
961 iscsi_tmf_rsp(conn, hdr); 1030 iscsi_tmf_rsp(conn, hdr);
962 __iscsi_put_task(task); 1031 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
963 break; 1032 break;
964 case ISCSI_OP_NOOP_IN: 1033 case ISCSI_OP_NOOP_IN:
965 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 1034 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
@@ -977,7 +1046,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
977 goto recv_pdu; 1046 goto recv_pdu;
978 1047
979 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); 1048 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
980 __iscsi_put_task(task); 1049 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
981 break; 1050 break;
982 default: 1051 default:
983 rc = ISCSI_ERR_BAD_OPCODE; 1052 rc = ISCSI_ERR_BAD_OPCODE;
@@ -989,7 +1058,7 @@ out:
989recv_pdu: 1058recv_pdu:
990 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) 1059 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
991 rc = ISCSI_ERR_CONN_FAILED; 1060 rc = ISCSI_ERR_CONN_FAILED;
992 __iscsi_put_task(task); 1061 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
993 return rc; 1062 return rc;
994} 1063}
995EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); 1064EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
@@ -1147,10 +1216,12 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
1147 spin_unlock_bh(&conn->session->lock); 1216 spin_unlock_bh(&conn->session->lock);
1148 rc = conn->session->tt->xmit_task(task); 1217 rc = conn->session->tt->xmit_task(task);
1149 spin_lock_bh(&conn->session->lock); 1218 spin_lock_bh(&conn->session->lock);
1150 __iscsi_put_task(task); 1219 if (!rc) {
1151 if (!rc)
1152 /* done with this task */ 1220 /* done with this task */
1221 task->last_xfer = jiffies;
1153 conn->task = NULL; 1222 conn->task = NULL;
1223 }
1224 __iscsi_put_task(task);
1154 return rc; 1225 return rc;
1155} 1226}
1156 1227
@@ -1166,7 +1237,12 @@ void iscsi_requeue_task(struct iscsi_task *task)
1166{ 1237{
1167 struct iscsi_conn *conn = task->conn; 1238 struct iscsi_conn *conn = task->conn;
1168 1239
1169 list_move_tail(&task->running, &conn->requeue); 1240 /*
1241 * this may be on the requeue list already if the xmit_task callout
1242 * is handling the r2ts while we are adding new ones
1243 */
1244 if (list_empty(&task->running))
1245 list_add_tail(&task->running, &conn->requeue);
1170 iscsi_conn_queue_work(conn); 1246 iscsi_conn_queue_work(conn);
1171} 1247}
1172EXPORT_SYMBOL_GPL(iscsi_requeue_task); 1248EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1206,6 +1282,7 @@ check_mgmt:
1206 while (!list_empty(&conn->mgmtqueue)) { 1282 while (!list_empty(&conn->mgmtqueue)) {
1207 conn->task = list_entry(conn->mgmtqueue.next, 1283 conn->task = list_entry(conn->mgmtqueue.next,
1208 struct iscsi_task, running); 1284 struct iscsi_task, running);
1285 list_del_init(&conn->task->running);
1209 if (iscsi_prep_mgmt_task(conn, conn->task)) { 1286 if (iscsi_prep_mgmt_task(conn, conn->task)) {
1210 __iscsi_put_task(conn->task); 1287 __iscsi_put_task(conn->task);
1211 conn->task = NULL; 1288 conn->task = NULL;
@@ -1217,23 +1294,26 @@ check_mgmt:
1217 } 1294 }
1218 1295
1219 /* process pending command queue */ 1296 /* process pending command queue */
1220 while (!list_empty(&conn->xmitqueue)) { 1297 while (!list_empty(&conn->cmdqueue)) {
1221 if (conn->tmf_state == TMF_QUEUED) 1298 if (conn->tmf_state == TMF_QUEUED)
1222 break; 1299 break;
1223 1300
1224 conn->task = list_entry(conn->xmitqueue.next, 1301 conn->task = list_entry(conn->cmdqueue.next,
1225 struct iscsi_task, running); 1302 struct iscsi_task, running);
1303 list_del_init(&conn->task->running);
1226 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { 1304 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1227 fail_command(conn, conn->task, DID_IMM_RETRY << 16); 1305 fail_scsi_task(conn->task, DID_IMM_RETRY);
1228 continue; 1306 continue;
1229 } 1307 }
1230 rc = iscsi_prep_scsi_cmd_pdu(conn->task); 1308 rc = iscsi_prep_scsi_cmd_pdu(conn->task);
1231 if (rc) { 1309 if (rc) {
1232 if (rc == -ENOMEM) { 1310 if (rc == -ENOMEM) {
1311 list_add_tail(&conn->task->running,
1312 &conn->cmdqueue);
1233 conn->task = NULL; 1313 conn->task = NULL;
1234 goto again; 1314 goto again;
1235 } else 1315 } else
1236 fail_command(conn, conn->task, DID_ABORT << 16); 1316 fail_scsi_task(conn->task, DID_ABORT);
1237 continue; 1317 continue;
1238 } 1318 }
1239 rc = iscsi_xmit_task(conn); 1319 rc = iscsi_xmit_task(conn);
@@ -1260,8 +1340,8 @@ check_mgmt:
1260 1340
1261 conn->task = list_entry(conn->requeue.next, 1341 conn->task = list_entry(conn->requeue.next,
1262 struct iscsi_task, running); 1342 struct iscsi_task, running);
1343 list_del_init(&conn->task->running);
1263 conn->task->state = ISCSI_TASK_RUNNING; 1344 conn->task->state = ISCSI_TASK_RUNNING;
1264 list_move_tail(conn->requeue.next, &conn->run_list);
1265 rc = iscsi_xmit_task(conn); 1345 rc = iscsi_xmit_task(conn);
1266 if (rc) 1346 if (rc)
1267 goto again; 1347 goto again;
@@ -1307,6 +1387,9 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
1307 task->state = ISCSI_TASK_PENDING; 1387 task->state = ISCSI_TASK_PENDING;
1308 task->conn = conn; 1388 task->conn = conn;
1309 task->sc = sc; 1389 task->sc = sc;
1390 task->have_checked_conn = false;
1391 task->last_timeout = jiffies;
1392 task->last_xfer = jiffies;
1310 INIT_LIST_HEAD(&task->running); 1393 INIT_LIST_HEAD(&task->running);
1311 return task; 1394 return task;
1312} 1395}
@@ -1328,6 +1411,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1328{ 1411{
1329 struct iscsi_cls_session *cls_session; 1412 struct iscsi_cls_session *cls_session;
1330 struct Scsi_Host *host; 1413 struct Scsi_Host *host;
1414 struct iscsi_host *ihost;
1331 int reason = 0; 1415 int reason = 0;
1332 struct iscsi_session *session; 1416 struct iscsi_session *session;
1333 struct iscsi_conn *conn; 1417 struct iscsi_conn *conn;
@@ -1338,6 +1422,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1338 sc->SCp.ptr = NULL; 1422 sc->SCp.ptr = NULL;
1339 1423
1340 host = sc->device->host; 1424 host = sc->device->host;
1425 ihost = shost_priv(host);
1341 spin_unlock(host->host_lock); 1426 spin_unlock(host->host_lock);
1342 1427
1343 cls_session = starget_to_session(scsi_target(sc->device)); 1428 cls_session = starget_to_session(scsi_target(sc->device));
@@ -1350,13 +1435,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1350 goto fault; 1435 goto fault;
1351 } 1436 }
1352 1437
1353 /* 1438 if (session->state != ISCSI_STATE_LOGGED_IN) {
1354 * ISCSI_STATE_FAILED is a temp. state. The recovery
1355 * code will decide what is best to do with command queued
1356 * during this time
1357 */
1358 if (session->state != ISCSI_STATE_LOGGED_IN &&
1359 session->state != ISCSI_STATE_FAILED) {
1360 /* 1439 /*
1361 * to handle the race between when we set the recovery state 1440 * to handle the race between when we set the recovery state
1362 * and block the session we requeue here (commands could 1441 * and block the session we requeue here (commands could
@@ -1364,12 +1443,15 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1364 * up because the block code is not locked) 1443 * up because the block code is not locked)
1365 */ 1444 */
1366 switch (session->state) { 1445 switch (session->state) {
1446 case ISCSI_STATE_FAILED:
1367 case ISCSI_STATE_IN_RECOVERY: 1447 case ISCSI_STATE_IN_RECOVERY:
1368 reason = FAILURE_SESSION_IN_RECOVERY; 1448 reason = FAILURE_SESSION_IN_RECOVERY;
1369 goto reject; 1449 sc->result = DID_IMM_RETRY << 16;
1450 break;
1370 case ISCSI_STATE_LOGGING_OUT: 1451 case ISCSI_STATE_LOGGING_OUT:
1371 reason = FAILURE_SESSION_LOGGING_OUT; 1452 reason = FAILURE_SESSION_LOGGING_OUT;
1372 goto reject; 1453 sc->result = DID_IMM_RETRY << 16;
1454 break;
1373 case ISCSI_STATE_RECOVERY_FAILED: 1455 case ISCSI_STATE_RECOVERY_FAILED:
1374 reason = FAILURE_SESSION_RECOVERY_TIMEOUT; 1456 reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
1375 sc->result = DID_TRANSPORT_FAILFAST << 16; 1457 sc->result = DID_TRANSPORT_FAILFAST << 16;
@@ -1402,9 +1484,8 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1402 reason = FAILURE_OOM; 1484 reason = FAILURE_OOM;
1403 goto reject; 1485 goto reject;
1404 } 1486 }
1405 list_add_tail(&task->running, &conn->xmitqueue);
1406 1487
1407 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { 1488 if (!ihost->workq) {
1408 reason = iscsi_prep_scsi_cmd_pdu(task); 1489 reason = iscsi_prep_scsi_cmd_pdu(task);
1409 if (reason) { 1490 if (reason) {
1410 if (reason == -ENOMEM) { 1491 if (reason == -ENOMEM) {
@@ -1419,8 +1500,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1419 reason = FAILURE_SESSION_NOT_READY; 1500 reason = FAILURE_SESSION_NOT_READY;
1420 goto prepd_reject; 1501 goto prepd_reject;
1421 } 1502 }
1422 } else 1503 } else {
1504 list_add_tail(&task->running, &conn->cmdqueue);
1423 iscsi_conn_queue_work(conn); 1505 iscsi_conn_queue_work(conn);
1506 }
1424 1507
1425 session->queued_cmdsn++; 1508 session->queued_cmdsn++;
1426 spin_unlock(&session->lock); 1509 spin_unlock(&session->lock);
@@ -1429,7 +1512,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1429 1512
1430prepd_reject: 1513prepd_reject:
1431 sc->scsi_done = NULL; 1514 sc->scsi_done = NULL;
1432 iscsi_complete_command(task); 1515 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1433reject: 1516reject:
1434 spin_unlock(&session->lock); 1517 spin_unlock(&session->lock);
1435 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", 1518 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
@@ -1439,7 +1522,7 @@ reject:
1439 1522
1440prepd_fault: 1523prepd_fault:
1441 sc->scsi_done = NULL; 1524 sc->scsi_done = NULL;
1442 iscsi_complete_command(task); 1525 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1443fault: 1526fault:
1444 spin_unlock(&session->lock); 1527 spin_unlock(&session->lock);
1445 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", 1528 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
@@ -1501,10 +1584,10 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)
1501 spin_lock_bh(&session->lock); 1584 spin_lock_bh(&session->lock);
1502 if (session->state == ISCSI_STATE_TERMINATE) { 1585 if (session->state == ISCSI_STATE_TERMINATE) {
1503failed: 1586failed:
1504 iscsi_session_printk(KERN_INFO, session, 1587 ISCSI_DBG_EH(session,
1505 "failing target reset: Could not log " 1588 "failing target reset: Could not log back into "
1506 "back into target [age %d]\n", 1589 "target [age %d]\n",
1507 session->age); 1590 session->age);
1508 spin_unlock_bh(&session->lock); 1591 spin_unlock_bh(&session->lock);
1509 mutex_unlock(&session->eh_mutex); 1592 mutex_unlock(&session->eh_mutex);
1510 return FAILED; 1593 return FAILED;
@@ -1518,7 +1601,7 @@ failed:
1518 */ 1601 */
1519 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1602 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1520 1603
1521 ISCSI_DBG_SESSION(session, "wait for relogin\n"); 1604 ISCSI_DBG_EH(session, "wait for relogin\n");
1522 wait_event_interruptible(conn->ehwait, 1605 wait_event_interruptible(conn->ehwait,
1523 session->state == ISCSI_STATE_TERMINATE || 1606 session->state == ISCSI_STATE_TERMINATE ||
1524 session->state == ISCSI_STATE_LOGGED_IN || 1607 session->state == ISCSI_STATE_LOGGED_IN ||
@@ -1528,10 +1611,10 @@ failed:
1528 1611
1529 mutex_lock(&session->eh_mutex); 1612 mutex_lock(&session->eh_mutex);
1530 spin_lock_bh(&session->lock); 1613 spin_lock_bh(&session->lock);
1531 if (session->state == ISCSI_STATE_LOGGED_IN) 1614 if (session->state == ISCSI_STATE_LOGGED_IN) {
1532 iscsi_session_printk(KERN_INFO, session, 1615 ISCSI_DBG_EH(session,
1533 "target reset succeeded\n"); 1616 "target reset succeeded\n");
1534 else 1617 } else
1535 goto failed; 1618 goto failed;
1536 spin_unlock_bh(&session->lock); 1619 spin_unlock_bh(&session->lock);
1537 mutex_unlock(&session->eh_mutex); 1620 mutex_unlock(&session->eh_mutex);
@@ -1547,7 +1630,7 @@ static void iscsi_tmf_timedout(unsigned long data)
1547 spin_lock(&session->lock); 1630 spin_lock(&session->lock);
1548 if (conn->tmf_state == TMF_QUEUED) { 1631 if (conn->tmf_state == TMF_QUEUED) {
1549 conn->tmf_state = TMF_TIMEDOUT; 1632 conn->tmf_state = TMF_TIMEDOUT;
1550 ISCSI_DBG_SESSION(session, "tmf timedout\n"); 1633 ISCSI_DBG_EH(session, "tmf timedout\n");
1551 /* unblock eh_abort() */ 1634 /* unblock eh_abort() */
1552 wake_up(&conn->ehwait); 1635 wake_up(&conn->ehwait);
1553 } 1636 }
@@ -1567,7 +1650,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1567 spin_unlock_bh(&session->lock); 1650 spin_unlock_bh(&session->lock);
1568 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1651 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1569 spin_lock_bh(&session->lock); 1652 spin_lock_bh(&session->lock);
1570 ISCSI_DBG_SESSION(session, "tmf exec failure\n"); 1653 ISCSI_DBG_EH(session, "tmf exec failure\n");
1571 return -EPERM; 1654 return -EPERM;
1572 } 1655 }
1573 conn->tmfcmd_pdus_cnt++; 1656 conn->tmfcmd_pdus_cnt++;
@@ -1575,7 +1658,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1575 conn->tmf_timer.function = iscsi_tmf_timedout; 1658 conn->tmf_timer.function = iscsi_tmf_timedout;
1576 conn->tmf_timer.data = (unsigned long)conn; 1659 conn->tmf_timer.data = (unsigned long)conn;
1577 add_timer(&conn->tmf_timer); 1660 add_timer(&conn->tmf_timer);
1578 ISCSI_DBG_SESSION(session, "tmf set timeout\n"); 1661 ISCSI_DBG_EH(session, "tmf set timeout\n");
1579 1662
1580 spin_unlock_bh(&session->lock); 1663 spin_unlock_bh(&session->lock);
1581 mutex_unlock(&session->eh_mutex); 1664 mutex_unlock(&session->eh_mutex);
@@ -1608,44 +1691,24 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1608 * Fail commands. session lock held and recv side suspended and xmit 1691 * Fail commands. session lock held and recv side suspended and xmit
1609 * thread flushed 1692 * thread flushed
1610 */ 1693 */
1611static void fail_all_commands(struct iscsi_conn *conn, unsigned lun, 1694static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
1612 int error) 1695 int error)
1613{ 1696{
1614 struct iscsi_task *task, *tmp; 1697 struct iscsi_task *task;
1615 1698 int i;
1616 if (conn->task) {
1617 if (lun == -1 ||
1618 (conn->task->sc && conn->task->sc->device->lun == lun))
1619 conn->task = NULL;
1620 }
1621 1699
1622 /* flush pending */ 1700 for (i = 0; i < conn->session->cmds_max; i++) {
1623 list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) { 1701 task = conn->session->cmds[i];
1624 if (lun == task->sc->device->lun || lun == -1) { 1702 if (!task->sc || task->state == ISCSI_TASK_FREE)
1625 ISCSI_DBG_SESSION(conn->session, 1703 continue;
1626 "failing pending sc %p itt 0x%x\n",
1627 task->sc, task->itt);
1628 fail_command(conn, task, error << 16);
1629 }
1630 }
1631 1704
1632 list_for_each_entry_safe(task, tmp, &conn->requeue, running) { 1705 if (lun != -1 && lun != task->sc->device->lun)
1633 if (lun == task->sc->device->lun || lun == -1) { 1706 continue;
1634 ISCSI_DBG_SESSION(conn->session,
1635 "failing requeued sc %p itt 0x%x\n",
1636 task->sc, task->itt);
1637 fail_command(conn, task, error << 16);
1638 }
1639 }
1640 1707
1641 /* fail all other running */ 1708 ISCSI_DBG_SESSION(conn->session,
1642 list_for_each_entry_safe(task, tmp, &conn->run_list, running) { 1709 "failing sc %p itt 0x%x state %d\n",
1643 if (lun == task->sc->device->lun || lun == -1) { 1710 task->sc, task->itt, task->state);
1644 ISCSI_DBG_SESSION(conn->session, 1711 fail_scsi_task(task, error);
1645 "failing in progress sc %p itt 0x%x\n",
1646 task->sc, task->itt);
1647 fail_command(conn, task, error << 16);
1648 }
1649 } 1712 }
1650} 1713}
1651 1714
@@ -1655,7 +1718,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
1655 struct iscsi_host *ihost = shost_priv(shost); 1718 struct iscsi_host *ihost = shost_priv(shost);
1656 1719
1657 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1720 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1658 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 1721 if (ihost->workq)
1659 flush_workqueue(ihost->workq); 1722 flush_workqueue(ihost->workq);
1660} 1723}
1661EXPORT_SYMBOL_GPL(iscsi_suspend_tx); 1724EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
@@ -1663,21 +1726,37 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
1663static void iscsi_start_tx(struct iscsi_conn *conn) 1726static void iscsi_start_tx(struct iscsi_conn *conn)
1664{ 1727{
1665 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1728 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1666 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 1729 iscsi_conn_queue_work(conn);
1667 iscsi_conn_queue_work(conn);
1668} 1730}
1669 1731
1670static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) 1732/*
1733 * We want to make sure a ping is in flight. It has timed out.
1734 * And we are not busy processing a pdu that is making
1735 * progress but got started before the ping and is taking a while
1736 * to complete so the ping is just stuck behind it in a queue.
1737 */
1738static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
1671{ 1739{
1740 if (conn->ping_task &&
1741 time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1742 (conn->ping_timeout * HZ), jiffies))
1743 return 1;
1744 else
1745 return 0;
1746}
1747
1748static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1749{
1750 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
1751 struct iscsi_task *task = NULL;
1672 struct iscsi_cls_session *cls_session; 1752 struct iscsi_cls_session *cls_session;
1673 struct iscsi_session *session; 1753 struct iscsi_session *session;
1674 struct iscsi_conn *conn; 1754 struct iscsi_conn *conn;
1675 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
1676 1755
1677 cls_session = starget_to_session(scsi_target(scmd->device)); 1756 cls_session = starget_to_session(scsi_target(sc->device));
1678 session = cls_session->dd_data; 1757 session = cls_session->dd_data;
1679 1758
1680 ISCSI_DBG_SESSION(session, "scsi cmd %p timedout\n", scmd); 1759 ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
1681 1760
1682 spin_lock(&session->lock); 1761 spin_lock(&session->lock);
1683 if (session->state != ISCSI_STATE_LOGGED_IN) { 1762 if (session->state != ISCSI_STATE_LOGGED_IN) {
@@ -1696,29 +1775,62 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1696 goto done; 1775 goto done;
1697 } 1776 }
1698 1777
1778 task = (struct iscsi_task *)sc->SCp.ptr;
1779 if (!task)
1780 goto done;
1781 /*
1782 * If we have sent (at least queued to the network layer) a pdu or
1783 * recvd one for the task since the last timeout ask for
1784 * more time. If on the next timeout we have not made progress
1785 * we can check if it is the task or connection when we send the
1786 * nop as a ping.
1787 */
1788 if (time_after_eq(task->last_xfer, task->last_timeout)) {
1789 ISCSI_DBG_EH(session, "Command making progress. Asking "
1790 "scsi-ml for more time to complete. "
1791 "Last data recv at %lu. Last timeout was at "
1792 "%lu\n.", task->last_xfer, task->last_timeout);
1793 task->have_checked_conn = false;
1794 rc = BLK_EH_RESET_TIMER;
1795 goto done;
1796 }
1797
1699 if (!conn->recv_timeout && !conn->ping_timeout) 1798 if (!conn->recv_timeout && !conn->ping_timeout)
1700 goto done; 1799 goto done;
1701 /* 1800 /*
1702 * if the ping timedout then we are in the middle of cleaning up 1801 * if the ping timedout then we are in the middle of cleaning up
1703 * and can let the iscsi eh handle it 1802 * and can let the iscsi eh handle it
1704 */ 1803 */
1705 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + 1804 if (iscsi_has_ping_timed_out(conn)) {
1706 (conn->ping_timeout * HZ), jiffies))
1707 rc = BLK_EH_RESET_TIMER; 1805 rc = BLK_EH_RESET_TIMER;
1806 goto done;
1807 }
1808
1809 /* Assumes nop timeout is shorter than scsi cmd timeout */
1810 if (task->have_checked_conn)
1811 goto done;
1812
1708 /* 1813 /*
1709 * if we are about to check the transport then give the command 1814 * Checking the transport already or nop from a cmd timeout still
1710 * more time 1815 * running
1711 */ 1816 */
1712 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), 1817 if (conn->ping_task) {
1713 jiffies)) 1818 task->have_checked_conn = true;
1714 rc = BLK_EH_RESET_TIMER;
1715 /* if in the middle of checking the transport then give us more time */
1716 if (conn->ping_task)
1717 rc = BLK_EH_RESET_TIMER; 1819 rc = BLK_EH_RESET_TIMER;
1820 goto done;
1821 }
1822
1823 /* Make sure there is a transport check done */
1824 iscsi_send_nopout(conn, NULL);
1825 task->have_checked_conn = true;
1826 rc = BLK_EH_RESET_TIMER;
1827
1718done: 1828done:
1829 if (task)
1830 task->last_timeout = jiffies;
1719 spin_unlock(&session->lock); 1831 spin_unlock(&session->lock);
1720 ISCSI_DBG_SESSION(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? 1832 ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
1721 "timer reset" : "nh"); 1833 "timer reset" : "nh");
1722 return rc; 1834 return rc;
1723} 1835}
1724 1836
@@ -1738,13 +1850,13 @@ static void iscsi_check_transport_timeouts(unsigned long data)
1738 1850
1739 recv_timeout *= HZ; 1851 recv_timeout *= HZ;
1740 last_recv = conn->last_recv; 1852 last_recv = conn->last_recv;
1741 if (conn->ping_task && 1853
1742 time_before_eq(conn->last_ping + (conn->ping_timeout * HZ), 1854 if (iscsi_has_ping_timed_out(conn)) {
1743 jiffies)) {
1744 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " 1855 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
1745 "expired, last rx %lu, last ping %lu, " 1856 "expired, recv timeout %d, last rx %lu, "
1746 "now %lu\n", conn->ping_timeout, last_recv, 1857 "last ping %lu, now %lu\n",
1747 conn->last_ping, jiffies); 1858 conn->ping_timeout, conn->recv_timeout,
1859 last_recv, conn->last_ping, jiffies);
1748 spin_unlock(&session->lock); 1860 spin_unlock(&session->lock);
1749 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1861 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1750 return; 1862 return;
@@ -1788,6 +1900,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1788 cls_session = starget_to_session(scsi_target(sc->device)); 1900 cls_session = starget_to_session(scsi_target(sc->device));
1789 session = cls_session->dd_data; 1901 session = cls_session->dd_data;
1790 1902
1903 ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
1904
1791 mutex_lock(&session->eh_mutex); 1905 mutex_lock(&session->eh_mutex);
1792 spin_lock_bh(&session->lock); 1906 spin_lock_bh(&session->lock);
1793 /* 1907 /*
@@ -1795,8 +1909,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1795 * got the command. 1909 * got the command.
1796 */ 1910 */
1797 if (!sc->SCp.ptr) { 1911 if (!sc->SCp.ptr) {
1798 ISCSI_DBG_SESSION(session, "sc never reached iscsi layer or " 1912 ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
1799 "it completed.\n"); 1913 "it completed.\n");
1800 spin_unlock_bh(&session->lock); 1914 spin_unlock_bh(&session->lock);
1801 mutex_unlock(&session->eh_mutex); 1915 mutex_unlock(&session->eh_mutex);
1802 return SUCCESS; 1916 return SUCCESS;
@@ -1810,6 +1924,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1810 sc->SCp.phase != session->age) { 1924 sc->SCp.phase != session->age) {
1811 spin_unlock_bh(&session->lock); 1925 spin_unlock_bh(&session->lock);
1812 mutex_unlock(&session->eh_mutex); 1926 mutex_unlock(&session->eh_mutex);
1927 ISCSI_DBG_EH(session, "failing abort due to dropped "
1928 "session.\n");
1813 return FAILED; 1929 return FAILED;
1814 } 1930 }
1815 1931
@@ -1818,18 +1934,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1818 age = session->age; 1934 age = session->age;
1819 1935
1820 task = (struct iscsi_task *)sc->SCp.ptr; 1936 task = (struct iscsi_task *)sc->SCp.ptr;
1821 ISCSI_DBG_SESSION(session, "aborting [sc %p itt 0x%x]\n", 1937 ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n",
1822 sc, task->itt); 1938 sc, task->itt);
1823 1939
1824 /* task completed before time out */ 1940 /* task completed before time out */
1825 if (!task->sc) { 1941 if (!task->sc) {
1826 ISCSI_DBG_SESSION(session, "sc completed while abort in " 1942 ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
1827 "progress\n");
1828 goto success; 1943 goto success;
1829 } 1944 }
1830 1945
1831 if (task->state == ISCSI_TASK_PENDING) { 1946 if (task->state == ISCSI_TASK_PENDING) {
1832 fail_command(conn, task, DID_ABORT << 16); 1947 fail_scsi_task(task, DID_ABORT);
1833 goto success; 1948 goto success;
1834 } 1949 }
1835 1950
@@ -1860,7 +1975,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1860 * then sent more data for the cmd. 1975 * then sent more data for the cmd.
1861 */ 1976 */
1862 spin_lock(&session->lock); 1977 spin_lock(&session->lock);
1863 fail_command(conn, task, DID_ABORT << 16); 1978 fail_scsi_task(task, DID_ABORT);
1864 conn->tmf_state = TMF_INITIAL; 1979 conn->tmf_state = TMF_INITIAL;
1865 spin_unlock(&session->lock); 1980 spin_unlock(&session->lock);
1866 iscsi_start_tx(conn); 1981 iscsi_start_tx(conn);
@@ -1873,8 +1988,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1873 if (!sc->SCp.ptr) { 1988 if (!sc->SCp.ptr) {
1874 conn->tmf_state = TMF_INITIAL; 1989 conn->tmf_state = TMF_INITIAL;
1875 /* task completed before tmf abort response */ 1990 /* task completed before tmf abort response */
1876 ISCSI_DBG_SESSION(session, "sc completed while abort " 1991 ISCSI_DBG_EH(session, "sc completed while abort in "
1877 "in progress\n"); 1992 "progress\n");
1878 goto success; 1993 goto success;
1879 } 1994 }
1880 /* fall through */ 1995 /* fall through */
@@ -1886,16 +2001,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1886success: 2001success:
1887 spin_unlock_bh(&session->lock); 2002 spin_unlock_bh(&session->lock);
1888success_unlocked: 2003success_unlocked:
1889 ISCSI_DBG_SESSION(session, "abort success [sc %p itt 0x%x]\n", 2004 ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
1890 sc, task->itt); 2005 sc, task->itt);
1891 mutex_unlock(&session->eh_mutex); 2006 mutex_unlock(&session->eh_mutex);
1892 return SUCCESS; 2007 return SUCCESS;
1893 2008
1894failed: 2009failed:
1895 spin_unlock_bh(&session->lock); 2010 spin_unlock_bh(&session->lock);
1896failed_unlocked: 2011failed_unlocked:
1897 ISCSI_DBG_SESSION(session, "abort failed [sc %p itt 0x%x]\n", sc, 2012 ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
1898 task ? task->itt : 0); 2013 task ? task->itt : 0);
1899 mutex_unlock(&session->eh_mutex); 2014 mutex_unlock(&session->eh_mutex);
1900 return FAILED; 2015 return FAILED;
1901} 2016}
@@ -1922,8 +2037,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1922 cls_session = starget_to_session(scsi_target(sc->device)); 2037 cls_session = starget_to_session(scsi_target(sc->device));
1923 session = cls_session->dd_data; 2038 session = cls_session->dd_data;
1924 2039
1925 ISCSI_DBG_SESSION(session, "LU Reset [sc %p lun %u]\n", 2040 ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
1926 sc, sc->device->lun);
1927 2041
1928 mutex_lock(&session->eh_mutex); 2042 mutex_lock(&session->eh_mutex);
1929 spin_lock_bh(&session->lock); 2043 spin_lock_bh(&session->lock);
@@ -1967,7 +2081,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1967 iscsi_suspend_tx(conn); 2081 iscsi_suspend_tx(conn);
1968 2082
1969 spin_lock_bh(&session->lock); 2083 spin_lock_bh(&session->lock);
1970 fail_all_commands(conn, sc->device->lun, DID_ERROR); 2084 fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
1971 conn->tmf_state = TMF_INITIAL; 2085 conn->tmf_state = TMF_INITIAL;
1972 spin_unlock_bh(&session->lock); 2086 spin_unlock_bh(&session->lock);
1973 2087
@@ -1977,8 +2091,8 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1977unlock: 2091unlock:
1978 spin_unlock_bh(&session->lock); 2092 spin_unlock_bh(&session->lock);
1979done: 2093done:
1980 ISCSI_DBG_SESSION(session, "dev reset result = %s\n", 2094 ISCSI_DBG_EH(session, "dev reset result = %s\n",
1981 rc == SUCCESS ? "SUCCESS" : "FAILED"); 2095 rc == SUCCESS ? "SUCCESS" : "FAILED");
1982 mutex_unlock(&session->eh_mutex); 2096 mutex_unlock(&session->eh_mutex);
1983 return rc; 2097 return rc;
1984} 2098}
@@ -2274,6 +2388,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
2274 if (cmd_task_size) 2388 if (cmd_task_size)
2275 task->dd_data = &task[1]; 2389 task->dd_data = &task[1];
2276 task->itt = cmd_i; 2390 task->itt = cmd_i;
2391 task->state = ISCSI_TASK_FREE;
2277 INIT_LIST_HEAD(&task->running); 2392 INIT_LIST_HEAD(&task->running);
2278 } 2393 }
2279 2394
@@ -2360,10 +2475,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2360 conn->transport_timer.data = (unsigned long)conn; 2475 conn->transport_timer.data = (unsigned long)conn;
2361 conn->transport_timer.function = iscsi_check_transport_timeouts; 2476 conn->transport_timer.function = iscsi_check_transport_timeouts;
2362 2477
2363 INIT_LIST_HEAD(&conn->run_list);
2364 INIT_LIST_HEAD(&conn->mgmt_run_list);
2365 INIT_LIST_HEAD(&conn->mgmtqueue); 2478 INIT_LIST_HEAD(&conn->mgmtqueue);
2366 INIT_LIST_HEAD(&conn->xmitqueue); 2479 INIT_LIST_HEAD(&conn->cmdqueue);
2367 INIT_LIST_HEAD(&conn->requeue); 2480 INIT_LIST_HEAD(&conn->requeue);
2368 INIT_WORK(&conn->xmitwork, iscsi_xmitworker); 2481 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
2369 2482
@@ -2531,27 +2644,28 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2531EXPORT_SYMBOL_GPL(iscsi_conn_start); 2644EXPORT_SYMBOL_GPL(iscsi_conn_start);
2532 2645
2533static void 2646static void
2534flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn) 2647fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
2535{ 2648{
2536 struct iscsi_task *task, *tmp; 2649 struct iscsi_task *task;
2650 int i, state;
2537 2651
2538 /* handle pending */ 2652 for (i = 0; i < conn->session->cmds_max; i++) {
2539 list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) { 2653 task = conn->session->cmds[i];
2540 ISCSI_DBG_SESSION(session, "flushing pending mgmt task " 2654 if (task->sc)
2541 "itt 0x%x\n", task->itt); 2655 continue;
2542 /* release ref from prep task */
2543 __iscsi_put_task(task);
2544 }
2545 2656
2546 /* handle running */ 2657 if (task->state == ISCSI_TASK_FREE)
2547 list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) { 2658 continue;
2548 ISCSI_DBG_SESSION(session, "flushing running mgmt task " 2659
2549 "itt 0x%x\n", task->itt); 2660 ISCSI_DBG_SESSION(conn->session,
2550 /* release ref from prep task */ 2661 "failing mgmt itt 0x%x state %d\n",
2551 __iscsi_put_task(task); 2662 task->itt, task->state);
2552 } 2663 state = ISCSI_TASK_ABRT_SESS_RECOV;
2664 if (task->state == ISCSI_TASK_PENDING)
2665 state = ISCSI_TASK_COMPLETED;
2666 iscsi_complete_task(task, state);
2553 2667
2554 conn->task = NULL; 2668 }
2555} 2669}
2556 2670
2557static void iscsi_start_session_recovery(struct iscsi_session *session, 2671static void iscsi_start_session_recovery(struct iscsi_session *session,
@@ -2559,8 +2673,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2559{ 2673{
2560 int old_stop_stage; 2674 int old_stop_stage;
2561 2675
2562 del_timer_sync(&conn->transport_timer);
2563
2564 mutex_lock(&session->eh_mutex); 2676 mutex_lock(&session->eh_mutex);
2565 spin_lock_bh(&session->lock); 2677 spin_lock_bh(&session->lock);
2566 if (conn->stop_stage == STOP_CONN_TERM) { 2678 if (conn->stop_stage == STOP_CONN_TERM) {
@@ -2578,13 +2690,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2578 session->state = ISCSI_STATE_TERMINATE; 2690 session->state = ISCSI_STATE_TERMINATE;
2579 else if (conn->stop_stage != STOP_CONN_RECOVER) 2691 else if (conn->stop_stage != STOP_CONN_RECOVER)
2580 session->state = ISCSI_STATE_IN_RECOVERY; 2692 session->state = ISCSI_STATE_IN_RECOVERY;
2693 spin_unlock_bh(&session->lock);
2694
2695 del_timer_sync(&conn->transport_timer);
2696 iscsi_suspend_tx(conn);
2581 2697
2698 spin_lock_bh(&session->lock);
2582 old_stop_stage = conn->stop_stage; 2699 old_stop_stage = conn->stop_stage;
2583 conn->stop_stage = flag; 2700 conn->stop_stage = flag;
2584 conn->c_stage = ISCSI_CONN_STOPPED; 2701 conn->c_stage = ISCSI_CONN_STOPPED;
2585 spin_unlock_bh(&session->lock); 2702 spin_unlock_bh(&session->lock);
2586 2703
2587 iscsi_suspend_tx(conn);
2588 /* 2704 /*
2589 * for connection level recovery we should not calculate 2705 * for connection level recovery we should not calculate
2590 * header digest. conn->hdr_size used for optimization 2706 * header digest. conn->hdr_size used for optimization
@@ -2605,11 +2721,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2605 * flush queues. 2721 * flush queues.
2606 */ 2722 */
2607 spin_lock_bh(&session->lock); 2723 spin_lock_bh(&session->lock);
2608 if (flag == STOP_CONN_RECOVER) 2724 fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
2609 fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED); 2725 fail_mgmt_tasks(session, conn);
2610 else
2611 fail_all_commands(conn, -1, DID_ERROR);
2612 flush_control_queues(session, conn);
2613 spin_unlock_bh(&session->lock); 2726 spin_unlock_bh(&session->lock);
2614 mutex_unlock(&session->eh_mutex); 2727 mutex_unlock(&session->eh_mutex);
2615} 2728}
@@ -2651,6 +2764,23 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2651} 2764}
2652EXPORT_SYMBOL_GPL(iscsi_conn_bind); 2765EXPORT_SYMBOL_GPL(iscsi_conn_bind);
2653 2766
2767static int iscsi_switch_str_param(char **param, char *new_val_buf)
2768{
2769 char *new_val;
2770
2771 if (*param) {
2772 if (!strcmp(*param, new_val_buf))
2773 return 0;
2774 }
2775
2776 new_val = kstrdup(new_val_buf, GFP_NOIO);
2777 if (!new_val)
2778 return -ENOMEM;
2779
2780 kfree(*param);
2781 *param = new_val;
2782 return 0;
2783}
2654 2784
2655int iscsi_set_param(struct iscsi_cls_conn *cls_conn, 2785int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2656 enum iscsi_param param, char *buf, int buflen) 2786 enum iscsi_param param, char *buf, int buflen)
@@ -2723,38 +2853,15 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2723 sscanf(buf, "%u", &conn->exp_statsn); 2853 sscanf(buf, "%u", &conn->exp_statsn);
2724 break; 2854 break;
2725 case ISCSI_PARAM_USERNAME: 2855 case ISCSI_PARAM_USERNAME:
2726 kfree(session->username); 2856 return iscsi_switch_str_param(&session->username, buf);
2727 session->username = kstrdup(buf, GFP_KERNEL);
2728 if (!session->username)
2729 return -ENOMEM;
2730 break;
2731 case ISCSI_PARAM_USERNAME_IN: 2857 case ISCSI_PARAM_USERNAME_IN:
2732 kfree(session->username_in); 2858 return iscsi_switch_str_param(&session->username_in, buf);
2733 session->username_in = kstrdup(buf, GFP_KERNEL);
2734 if (!session->username_in)
2735 return -ENOMEM;
2736 break;
2737 case ISCSI_PARAM_PASSWORD: 2859 case ISCSI_PARAM_PASSWORD:
2738 kfree(session->password); 2860 return iscsi_switch_str_param(&session->password, buf);
2739 session->password = kstrdup(buf, GFP_KERNEL);
2740 if (!session->password)
2741 return -ENOMEM;
2742 break;
2743 case ISCSI_PARAM_PASSWORD_IN: 2861 case ISCSI_PARAM_PASSWORD_IN:
2744 kfree(session->password_in); 2862 return iscsi_switch_str_param(&session->password_in, buf);
2745 session->password_in = kstrdup(buf, GFP_KERNEL);
2746 if (!session->password_in)
2747 return -ENOMEM;
2748 break;
2749 case ISCSI_PARAM_TARGET_NAME: 2863 case ISCSI_PARAM_TARGET_NAME:
2750 /* this should not change between logins */ 2864 return iscsi_switch_str_param(&session->targetname, buf);
2751 if (session->targetname)
2752 break;
2753
2754 session->targetname = kstrdup(buf, GFP_KERNEL);
2755 if (!session->targetname)
2756 return -ENOMEM;
2757 break;
2758 case ISCSI_PARAM_TPGT: 2865 case ISCSI_PARAM_TPGT:
2759 sscanf(buf, "%d", &session->tpgt); 2866 sscanf(buf, "%d", &session->tpgt);
2760 break; 2867 break;
@@ -2762,25 +2869,11 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2762 sscanf(buf, "%d", &conn->persistent_port); 2869 sscanf(buf, "%d", &conn->persistent_port);
2763 break; 2870 break;
2764 case ISCSI_PARAM_PERSISTENT_ADDRESS: 2871 case ISCSI_PARAM_PERSISTENT_ADDRESS:
2765 /* 2872 return iscsi_switch_str_param(&conn->persistent_address, buf);
2766 * this is the address returned in discovery so it should
2767 * not change between logins.
2768 */
2769 if (conn->persistent_address)
2770 break;
2771
2772 conn->persistent_address = kstrdup(buf, GFP_KERNEL);
2773 if (!conn->persistent_address)
2774 return -ENOMEM;
2775 break;
2776 case ISCSI_PARAM_IFACE_NAME: 2873 case ISCSI_PARAM_IFACE_NAME:
2777 if (!session->ifacename) 2874 return iscsi_switch_str_param(&session->ifacename, buf);
2778 session->ifacename = kstrdup(buf, GFP_KERNEL);
2779 break;
2780 case ISCSI_PARAM_INITIATOR_NAME: 2875 case ISCSI_PARAM_INITIATOR_NAME:
2781 if (!session->initiatorname) 2876 return iscsi_switch_str_param(&session->initiatorname, buf);
2782 session->initiatorname = kstrdup(buf, GFP_KERNEL);
2783 break;
2784 default: 2877 default:
2785 return -ENOSYS; 2878 return -ENOSYS;
2786 } 2879 }
@@ -2851,10 +2944,7 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
2851 len = sprintf(buf, "%s\n", session->ifacename); 2944 len = sprintf(buf, "%s\n", session->ifacename);
2852 break; 2945 break;
2853 case ISCSI_PARAM_INITIATOR_NAME: 2946 case ISCSI_PARAM_INITIATOR_NAME:
2854 if (!session->initiatorname) 2947 len = sprintf(buf, "%s\n", session->initiatorname);
2855 len = sprintf(buf, "%s\n", "unknown");
2856 else
2857 len = sprintf(buf, "%s\n", session->initiatorname);
2858 break; 2948 break;
2859 default: 2949 default:
2860 return -ENOSYS; 2950 return -ENOSYS;
@@ -2920,29 +3010,16 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2920 3010
2921 switch (param) { 3011 switch (param) {
2922 case ISCSI_HOST_PARAM_NETDEV_NAME: 3012 case ISCSI_HOST_PARAM_NETDEV_NAME:
2923 if (!ihost->netdev) 3013 len = sprintf(buf, "%s\n", ihost->netdev);
2924 len = sprintf(buf, "%s\n", "default");
2925 else
2926 len = sprintf(buf, "%s\n", ihost->netdev);
2927 break; 3014 break;
2928 case ISCSI_HOST_PARAM_HWADDRESS: 3015 case ISCSI_HOST_PARAM_HWADDRESS:
2929 if (!ihost->hwaddress) 3016 len = sprintf(buf, "%s\n", ihost->hwaddress);
2930 len = sprintf(buf, "%s\n", "default");
2931 else
2932 len = sprintf(buf, "%s\n", ihost->hwaddress);
2933 break; 3017 break;
2934 case ISCSI_HOST_PARAM_INITIATOR_NAME: 3018 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2935 if (!ihost->initiatorname) 3019 len = sprintf(buf, "%s\n", ihost->initiatorname);
2936 len = sprintf(buf, "%s\n", "unknown");
2937 else
2938 len = sprintf(buf, "%s\n", ihost->initiatorname);
2939 break; 3020 break;
2940 case ISCSI_HOST_PARAM_IPADDRESS: 3021 case ISCSI_HOST_PARAM_IPADDRESS:
2941 if (!strlen(ihost->local_address)) 3022 len = sprintf(buf, "%s\n", ihost->local_address);
2942 len = sprintf(buf, "%s\n", "unknown");
2943 else
2944 len = sprintf(buf, "%s\n",
2945 ihost->local_address);
2946 break; 3023 break;
2947 default: 3024 default:
2948 return -ENOSYS; 3025 return -ENOSYS;
@@ -2959,17 +3036,11 @@ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2959 3036
2960 switch (param) { 3037 switch (param) {
2961 case ISCSI_HOST_PARAM_NETDEV_NAME: 3038 case ISCSI_HOST_PARAM_NETDEV_NAME:
2962 if (!ihost->netdev) 3039 return iscsi_switch_str_param(&ihost->netdev, buf);
2963 ihost->netdev = kstrdup(buf, GFP_KERNEL);
2964 break;
2965 case ISCSI_HOST_PARAM_HWADDRESS: 3040 case ISCSI_HOST_PARAM_HWADDRESS:
2966 if (!ihost->hwaddress) 3041 return iscsi_switch_str_param(&ihost->hwaddress, buf);
2967 ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
2968 break;
2969 case ISCSI_HOST_PARAM_INITIATOR_NAME: 3042 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2970 if (!ihost->initiatorname) 3043 return iscsi_switch_str_param(&ihost->initiatorname, buf);
2971 ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
2972 break;
2973 default: 3044 default:
2974 return -ENOSYS; 3045 return -ENOSYS;
2975 } 3046 }
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index b579ca9f4836..2e0746d70303 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -440,8 +440,8 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
440 struct iscsi_tcp_task *tcp_task = task->dd_data; 440 struct iscsi_tcp_task *tcp_task = task->dd_data;
441 struct iscsi_r2t_info *r2t; 441 struct iscsi_r2t_info *r2t;
442 442
443 /* nothing to do for mgmt or pending tasks */ 443 /* nothing to do for mgmt */
444 if (!task->sc || task->state == ISCSI_TASK_PENDING) 444 if (!task->sc)
445 return; 445 return;
446 446
447 /* flush task's r2t queues */ 447 /* flush task's r2t queues */
@@ -473,7 +473,13 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
473 int datasn = be32_to_cpu(rhdr->datasn); 473 int datasn = be32_to_cpu(rhdr->datasn);
474 unsigned total_in_length = scsi_in(task->sc)->length; 474 unsigned total_in_length = scsi_in(task->sc)->length;
475 475
476 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr); 476 /*
477 * lib iscsi will update this in the completion handling if there
478 * is status.
479 */
480 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
481 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
482
477 if (tcp_conn->in.datalen == 0) 483 if (tcp_conn->in.datalen == 0)
478 return 0; 484 return 0;
479 485
@@ -680,6 +686,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
680 "offset=%d, datalen=%d)\n", 686 "offset=%d, datalen=%d)\n",
681 tcp_task->data_offset, 687 tcp_task->data_offset,
682 tcp_conn->in.datalen); 688 tcp_conn->in.datalen);
689 task->last_xfer = jiffies;
683 rc = iscsi_segment_seek_sg(&tcp_conn->in.segment, 690 rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
684 sdb->table.sgl, 691 sdb->table.sgl,
685 sdb->table.nents, 692 sdb->table.nents,
@@ -707,9 +714,10 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
707 rc = ISCSI_ERR_BAD_ITT; 714 rc = ISCSI_ERR_BAD_ITT;
708 else if (ahslen) 715 else if (ahslen)
709 rc = ISCSI_ERR_AHSLEN; 716 rc = ISCSI_ERR_AHSLEN;
710 else if (task->sc->sc_data_direction == DMA_TO_DEVICE) 717 else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
718 task->last_xfer = jiffies;
711 rc = iscsi_tcp_r2t_rsp(conn, task); 719 rc = iscsi_tcp_r2t_rsp(conn, task);
712 else 720 } else
713 rc = ISCSI_ERR_PROTO; 721 rc = ISCSI_ERR_PROTO;
714 spin_unlock(&conn->session->lock); 722 spin_unlock(&conn->session->lock);
715 break; 723 break;
@@ -857,6 +865,12 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
857 int rc = 0; 865 int rc = 0;
858 866
859 ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset); 867 ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset);
868 /*
869 * Update for each skb instead of pdu, because over slow networks a
870 * data_in's data could take a while to read in. We also want to
871 * account for r2ts.
872 */
873 conn->last_recv = jiffies;
860 874
861 if (unlikely(conn->suspend_rx)) { 875 if (unlikely(conn->suspend_rx)) {
862 ISCSI_DBG_TCP(conn, "Rx suspended!\n"); 876 ISCSI_DBG_TCP(conn, "Rx suspended!\n");
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 3da02e436788..54fa1e42dc4d 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1927,21 +1927,21 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1927 /* do we need to support multiple segments? */ 1927 /* do we need to support multiple segments? */
1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1929 printk("%s: multiple segments req %u %u, rsp %u %u\n", 1929 printk("%s: multiple segments req %u %u, rsp %u %u\n",
1930 __func__, req->bio->bi_vcnt, req->data_len, 1930 __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
1931 rsp->bio->bi_vcnt, rsp->data_len); 1931 rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1932 return -EINVAL; 1932 return -EINVAL;
1933 } 1933 }
1934 1934
1935 ret = smp_execute_task(dev, bio_data(req->bio), req->data_len, 1935 ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req),
1936 bio_data(rsp->bio), rsp->data_len); 1936 bio_data(rsp->bio), blk_rq_bytes(rsp));
1937 if (ret > 0) { 1937 if (ret > 0) {
1938 /* positive number is the untransferred residual */ 1938 /* positive number is the untransferred residual */
1939 rsp->data_len = ret; 1939 rsp->resid_len = ret;
1940 req->data_len = 0; 1940 req->resid_len = 0;
1941 ret = 0; 1941 ret = 0;
1942 } else if (ret == 0) { 1942 } else if (ret == 0) {
1943 rsp->data_len = 0; 1943 rsp->resid_len = 0;
1944 req->data_len = 0; 1944 req->resid_len = 0;
1945 } 1945 }
1946 1946
1947 return ret; 1947 return ret;
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index d110a366c48a..1bc3b7567994 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -134,24 +134,24 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
134{ 134{
135 u8 *req_data = NULL, *resp_data = NULL, *buf; 135 u8 *req_data = NULL, *resp_data = NULL, *buf;
136 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 136 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
137 int error = -EINVAL, resp_data_len = rsp->data_len; 137 int error = -EINVAL;
138 138
139 /* eight is the minimum size for request and response frames */ 139 /* eight is the minimum size for request and response frames */
140 if (req->data_len < 8 || rsp->data_len < 8) 140 if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8)
141 goto out; 141 goto out;
142 142
143 if (bio_offset(req->bio) + req->data_len > PAGE_SIZE || 143 if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE ||
144 bio_offset(rsp->bio) + rsp->data_len > PAGE_SIZE) { 144 bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) {
145 shost_printk(KERN_ERR, shost, 145 shost_printk(KERN_ERR, shost,
146 "SMP request/response frame crosses page boundary"); 146 "SMP request/response frame crosses page boundary");
147 goto out; 147 goto out;
148 } 148 }
149 149
150 req_data = kzalloc(req->data_len, GFP_KERNEL); 150 req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL);
151 151
152 /* make sure frame can always be built ... we copy 152 /* make sure frame can always be built ... we copy
153 * back only the requested length */ 153 * back only the requested length */
154 resp_data = kzalloc(max(rsp->data_len, 128U), GFP_KERNEL); 154 resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL);
155 155
156 if (!req_data || !resp_data) { 156 if (!req_data || !resp_data) {
157 error = -ENOMEM; 157 error = -ENOMEM;
@@ -160,7 +160,7 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
160 160
161 local_irq_disable(); 161 local_irq_disable();
162 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio); 162 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
163 memcpy(req_data, buf, req->data_len); 163 memcpy(req_data, buf, blk_rq_bytes(req));
164 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0); 164 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
165 local_irq_enable(); 165 local_irq_enable();
166 166
@@ -178,15 +178,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
178 178
179 switch (req_data[1]) { 179 switch (req_data[1]) {
180 case SMP_REPORT_GENERAL: 180 case SMP_REPORT_GENERAL:
181 req->data_len -= 8; 181 req->resid_len -= 8;
182 resp_data_len -= 32; 182 rsp->resid_len -= 32;
183 resp_data[2] = SMP_RESP_FUNC_ACC; 183 resp_data[2] = SMP_RESP_FUNC_ACC;
184 resp_data[9] = sas_ha->num_phys; 184 resp_data[9] = sas_ha->num_phys;
185 break; 185 break;
186 186
187 case SMP_REPORT_MANUF_INFO: 187 case SMP_REPORT_MANUF_INFO:
188 req->data_len -= 8; 188 req->resid_len -= 8;
189 resp_data_len -= 64; 189 rsp->resid_len -= 64;
190 resp_data[2] = SMP_RESP_FUNC_ACC; 190 resp_data[2] = SMP_RESP_FUNC_ACC;
191 memcpy(resp_data + 12, shost->hostt->name, 191 memcpy(resp_data + 12, shost->hostt->name,
192 SAS_EXPANDER_VENDOR_ID_LEN); 192 SAS_EXPANDER_VENDOR_ID_LEN);
@@ -199,13 +199,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
199 break; 199 break;
200 200
201 case SMP_DISCOVER: 201 case SMP_DISCOVER:
202 req->data_len -= 16; 202 req->resid_len -= 16;
203 if ((int)req->data_len < 0) { 203 if ((int)req->resid_len < 0) {
204 req->data_len = 0; 204 req->resid_len = 0;
205 error = -EINVAL; 205 error = -EINVAL;
206 goto out; 206 goto out;
207 } 207 }
208 resp_data_len -= 56; 208 rsp->resid_len -= 56;
209 sas_host_smp_discover(sas_ha, resp_data, req_data[9]); 209 sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
210 break; 210 break;
211 211
@@ -215,13 +215,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
215 break; 215 break;
216 216
217 case SMP_REPORT_PHY_SATA: 217 case SMP_REPORT_PHY_SATA:
218 req->data_len -= 16; 218 req->resid_len -= 16;
219 if ((int)req->data_len < 0) { 219 if ((int)req->resid_len < 0) {
220 req->data_len = 0; 220 req->resid_len = 0;
221 error = -EINVAL; 221 error = -EINVAL;
222 goto out; 222 goto out;
223 } 223 }
224 resp_data_len -= 60; 224 rsp->resid_len -= 60;
225 sas_report_phy_sata(sas_ha, resp_data, req_data[9]); 225 sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
226 break; 226 break;
227 227
@@ -238,13 +238,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
238 break; 238 break;
239 239
240 case SMP_PHY_CONTROL: 240 case SMP_PHY_CONTROL:
241 req->data_len -= 44; 241 req->resid_len -= 44;
242 if ((int)req->data_len < 0) { 242 if ((int)req->resid_len < 0) {
243 req->data_len = 0; 243 req->resid_len = 0;
244 error = -EINVAL; 244 error = -EINVAL;
245 goto out; 245 goto out;
246 } 246 }
247 resp_data_len -= 8; 247 rsp->resid_len -= 8;
248 sas_phy_control(sas_ha, req_data[9], req_data[10], 248 sas_phy_control(sas_ha, req_data[9], req_data[10],
249 req_data[32] >> 4, req_data[33] >> 4, 249 req_data[32] >> 4, req_data[33] >> 4,
250 resp_data); 250 resp_data);
@@ -261,11 +261,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
261 261
262 local_irq_disable(); 262 local_irq_disable();
263 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio); 263 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
264 memcpy(buf, resp_data, rsp->data_len); 264 memcpy(buf, resp_data, blk_rq_bytes(rsp));
265 flush_kernel_dcache_page(bio_page(rsp->bio)); 265 flush_kernel_dcache_page(bio_page(rsp->bio));
266 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0); 266 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
267 local_irq_enable(); 267 local_irq_enable();
268 rsp->data_len = resp_data_len;
269 268
270 out: 269 out:
271 kfree(req_data); 270 kfree(req_data);
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 15e2d132e8b9..2742ae8a3d09 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -135,7 +135,7 @@ int srp_target_alloc(struct srp_target *target, struct device *dev,
135 INIT_LIST_HEAD(&target->cmd_queue); 135 INIT_LIST_HEAD(&target->cmd_queue);
136 136
137 target->dev = dev; 137 target->dev = dev;
138 target->dev->driver_data = target; 138 dev_set_drvdata(target->dev, target);
139 139
140 target->srp_iu_size = iu_size; 140 target->srp_iu_size = iu_size;
141 target->rx_ring_size = nr; 141 target->rx_ring_size = nr;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1105f9a111ba..1877d9811831 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -23,6 +23,13 @@
23 23
24struct lpfc_sli2_slim; 24struct lpfc_sli2_slim;
25 25
26#define LPFC_PCI_DEV_LP 0x1
27#define LPFC_PCI_DEV_OC 0x2
28
29#define LPFC_SLI_REV2 2
30#define LPFC_SLI_REV3 3
31#define LPFC_SLI_REV4 4
32
26#define LPFC_MAX_TARGET 4096 /* max number of targets supported */ 33#define LPFC_MAX_TARGET 4096 /* max number of targets supported */
27#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els 34#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
28 requests */ 35 requests */
@@ -98,9 +105,11 @@ struct lpfc_dma_pool {
98}; 105};
99 106
100struct hbq_dmabuf { 107struct hbq_dmabuf {
108 struct lpfc_dmabuf hbuf;
101 struct lpfc_dmabuf dbuf; 109 struct lpfc_dmabuf dbuf;
102 uint32_t size; 110 uint32_t size;
103 uint32_t tag; 111 uint32_t tag;
112 struct lpfc_rcqe rcqe;
104}; 113};
105 114
106/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ 115/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@@ -134,7 +143,10 @@ typedef struct lpfc_vpd {
134 } rev; 143 } rev;
135 struct { 144 struct {
136#ifdef __BIG_ENDIAN_BITFIELD 145#ifdef __BIG_ENDIAN_BITFIELD
137 uint32_t rsvd2 :24; /* Reserved */ 146 uint32_t rsvd3 :19; /* Reserved */
147 uint32_t cdss : 1; /* Configure Data Security SLI */
148 uint32_t rsvd2 : 3; /* Reserved */
149 uint32_t cbg : 1; /* Configure BlockGuard */
138 uint32_t cmv : 1; /* Configure Max VPIs */ 150 uint32_t cmv : 1; /* Configure Max VPIs */
139 uint32_t ccrp : 1; /* Config Command Ring Polling */ 151 uint32_t ccrp : 1; /* Config Command Ring Polling */
140 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 152 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
@@ -152,7 +164,10 @@ typedef struct lpfc_vpd {
152 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 164 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
153 uint32_t ccrp : 1; /* Config Command Ring Polling */ 165 uint32_t ccrp : 1; /* Config Command Ring Polling */
154 uint32_t cmv : 1; /* Configure Max VPIs */ 166 uint32_t cmv : 1; /* Configure Max VPIs */
155 uint32_t rsvd2 :24; /* Reserved */ 167 uint32_t cbg : 1; /* Configure BlockGuard */
168 uint32_t rsvd2 : 3; /* Reserved */
169 uint32_t cdss : 1; /* Configure Data Security SLI */
170 uint32_t rsvd3 :19; /* Reserved */
156#endif 171#endif
157 } sli3Feat; 172 } sli3Feat;
158} lpfc_vpd_t; 173} lpfc_vpd_t;
@@ -264,8 +279,8 @@ enum hba_state {
264}; 279};
265 280
266struct lpfc_vport { 281struct lpfc_vport {
267 struct list_head listentry;
268 struct lpfc_hba *phba; 282 struct lpfc_hba *phba;
283 struct list_head listentry;
269 uint8_t port_type; 284 uint8_t port_type;
270#define LPFC_PHYSICAL_PORT 1 285#define LPFC_PHYSICAL_PORT 1
271#define LPFC_NPIV_PORT 2 286#define LPFC_NPIV_PORT 2
@@ -273,6 +288,9 @@ struct lpfc_vport {
273 enum discovery_state port_state; 288 enum discovery_state port_state;
274 289
275 uint16_t vpi; 290 uint16_t vpi;
291 uint16_t vfi;
292 uint8_t vfi_state;
293#define LPFC_VFI_REGISTERED 0x1
276 294
277 uint32_t fc_flag; /* FC flags */ 295 uint32_t fc_flag; /* FC flags */
278/* Several of these flags are HBA centric and should be moved to 296/* Several of these flags are HBA centric and should be moved to
@@ -385,6 +403,9 @@ struct lpfc_vport {
385#endif 403#endif
386 uint8_t stat_data_enabled; 404 uint8_t stat_data_enabled;
387 uint8_t stat_data_blocked; 405 uint8_t stat_data_blocked;
406 struct list_head rcv_buffer_list;
407 uint32_t vport_flag;
408#define STATIC_VPORT 1
388}; 409};
389 410
390struct hbq_s { 411struct hbq_s {
@@ -420,8 +441,62 @@ enum intr_type_t {
420}; 441};
421 442
422struct lpfc_hba { 443struct lpfc_hba {
444 /* SCSI interface function jump table entries */
445 int (*lpfc_new_scsi_buf)
446 (struct lpfc_vport *, int);
447 struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
448 (struct lpfc_hba *);
449 int (*lpfc_scsi_prep_dma_buf)
450 (struct lpfc_hba *, struct lpfc_scsi_buf *);
451 void (*lpfc_scsi_unprep_dma_buf)
452 (struct lpfc_hba *, struct lpfc_scsi_buf *);
453 void (*lpfc_release_scsi_buf)
454 (struct lpfc_hba *, struct lpfc_scsi_buf *);
455 void (*lpfc_rampdown_queue_depth)
456 (struct lpfc_hba *);
457 void (*lpfc_scsi_prep_cmnd)
458 (struct lpfc_vport *, struct lpfc_scsi_buf *,
459 struct lpfc_nodelist *);
460 /* IOCB interface function jump table entries */
461 int (*__lpfc_sli_issue_iocb)
462 (struct lpfc_hba *, uint32_t,
463 struct lpfc_iocbq *, uint32_t);
464 void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
465 struct lpfc_iocbq *);
466 int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
467
468
469 IOCB_t * (*lpfc_get_iocb_from_iocbq)
470 (struct lpfc_iocbq *);
471 void (*lpfc_scsi_cmd_iocb_cmpl)
472 (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
473
474 /* MBOX interface function jump table entries */
475 int (*lpfc_sli_issue_mbox)
476 (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
477 /* Slow-path IOCB process function jump table entries */
478 void (*lpfc_sli_handle_slow_ring_event)
479 (struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
480 uint32_t mask);
481 /* INIT device interface function jump table entries */
482 int (*lpfc_sli_hbq_to_firmware)
483 (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
484 int (*lpfc_sli_brdrestart)
485 (struct lpfc_hba *);
486 int (*lpfc_sli_brdready)
487 (struct lpfc_hba *, uint32_t);
488 void (*lpfc_handle_eratt)
489 (struct lpfc_hba *);
490 void (*lpfc_stop_port)
491 (struct lpfc_hba *);
492
493
494 /* SLI4 specific HBA data structure */
495 struct lpfc_sli4_hba sli4_hba;
496
423 struct lpfc_sli sli; 497 struct lpfc_sli sli;
424 uint32_t sli_rev; /* SLI2 or SLI3 */ 498 uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
499 uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */
425 uint32_t sli3_options; /* Mask of enabled SLI3 options */ 500 uint32_t sli3_options; /* Mask of enabled SLI3 options */
426#define LPFC_SLI3_HBQ_ENABLED 0x01 501#define LPFC_SLI3_HBQ_ENABLED 0x01
427#define LPFC_SLI3_NPIV_ENABLED 0x02 502#define LPFC_SLI3_NPIV_ENABLED 0x02
@@ -429,6 +504,7 @@ struct lpfc_hba {
429#define LPFC_SLI3_CRP_ENABLED 0x08 504#define LPFC_SLI3_CRP_ENABLED 0x08
430#define LPFC_SLI3_INB_ENABLED 0x10 505#define LPFC_SLI3_INB_ENABLED 0x10
431#define LPFC_SLI3_BG_ENABLED 0x20 506#define LPFC_SLI3_BG_ENABLED 0x20
507#define LPFC_SLI3_DSS_ENABLED 0x40
432 uint32_t iocb_cmd_size; 508 uint32_t iocb_cmd_size;
433 uint32_t iocb_rsp_size; 509 uint32_t iocb_rsp_size;
434 510
@@ -442,8 +518,13 @@ struct lpfc_hba {
442 518
443 uint32_t hba_flag; /* hba generic flags */ 519 uint32_t hba_flag; /* hba generic flags */
444#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 520#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
445 521#define DEFER_ERATT 0x2 /* Deferred error attention in progress */
446#define DEFER_ERATT 0x4 /* Deferred error attention in progress */ 522#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */
523#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */
524#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
525#define FCP_XRI_ABORT_EVENT 0x20
526#define ELS_XRI_ABORT_EVENT 0x40
527#define ASYNC_EVENT 0x80
447 struct lpfc_dmabuf slim2p; 528 struct lpfc_dmabuf slim2p;
448 529
449 MAILBOX_t *mbox; 530 MAILBOX_t *mbox;
@@ -502,6 +583,9 @@ struct lpfc_hba {
502 uint32_t cfg_poll; 583 uint32_t cfg_poll;
503 uint32_t cfg_poll_tmo; 584 uint32_t cfg_poll_tmo;
504 uint32_t cfg_use_msi; 585 uint32_t cfg_use_msi;
586 uint32_t cfg_fcp_imax;
587 uint32_t cfg_fcp_wq_count;
588 uint32_t cfg_fcp_eq_count;
505 uint32_t cfg_sg_seg_cnt; 589 uint32_t cfg_sg_seg_cnt;
506 uint32_t cfg_prot_sg_seg_cnt; 590 uint32_t cfg_prot_sg_seg_cnt;
507 uint32_t cfg_sg_dma_buf_size; 591 uint32_t cfg_sg_dma_buf_size;
@@ -511,6 +595,8 @@ struct lpfc_hba {
511 uint32_t cfg_enable_hba_reset; 595 uint32_t cfg_enable_hba_reset;
512 uint32_t cfg_enable_hba_heartbeat; 596 uint32_t cfg_enable_hba_heartbeat;
513 uint32_t cfg_enable_bg; 597 uint32_t cfg_enable_bg;
598 uint32_t cfg_enable_fip;
599 uint32_t cfg_log_verbose;
514 600
515 lpfc_vpd_t vpd; /* vital product data */ 601 lpfc_vpd_t vpd; /* vital product data */
516 602
@@ -526,11 +612,12 @@ struct lpfc_hba {
526 unsigned long data_flags; 612 unsigned long data_flags;
527 613
528 uint32_t hbq_in_use; /* HBQs in use flag */ 614 uint32_t hbq_in_use; /* HBQs in use flag */
529 struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ 615 struct list_head rb_pend_list; /* Received buffers to be processed */
530 uint32_t hbq_count; /* Count of configured HBQs */ 616 uint32_t hbq_count; /* Count of configured HBQs */
531 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ 617 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
532 618
533 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ 619 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
620 unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
534 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ 621 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
535 void __iomem *slim_memmap_p; /* Kernel memory mapped address for 622 void __iomem *slim_memmap_p; /* Kernel memory mapped address for
536 PCI BAR0 */ 623 PCI BAR0 */
@@ -593,7 +680,8 @@ struct lpfc_hba {
593 /* pci_mem_pools */ 680 /* pci_mem_pools */
594 struct pci_pool *lpfc_scsi_dma_buf_pool; 681 struct pci_pool *lpfc_scsi_dma_buf_pool;
595 struct pci_pool *lpfc_mbuf_pool; 682 struct pci_pool *lpfc_mbuf_pool;
596 struct pci_pool *lpfc_hbq_pool; 683 struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
684 struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
597 struct lpfc_dma_pool lpfc_mbuf_safety_pool; 685 struct lpfc_dma_pool lpfc_mbuf_safety_pool;
598 686
599 mempool_t *mbox_mem_pool; 687 mempool_t *mbox_mem_pool;
@@ -609,6 +697,14 @@ struct lpfc_hba {
609 struct lpfc_vport *pport; /* physical lpfc_vport pointer */ 697 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
610 uint16_t max_vpi; /* Maximum virtual nports */ 698 uint16_t max_vpi; /* Maximum virtual nports */
611#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ 699#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
700 uint16_t max_vports; /*
701 * For IOV HBAs max_vpi can change
702 * after a reset. max_vports is max
703 * number of vports present. This can
704 * be greater than max_vpi.
705 */
706 uint16_t vpi_base;
707 uint16_t vfi_base;
612 unsigned long *vpi_bmask; /* vpi allocation table */ 708 unsigned long *vpi_bmask; /* vpi allocation table */
613 709
614 /* Data structure used by fabric iocb scheduler */ 710 /* Data structure used by fabric iocb scheduler */
@@ -667,6 +763,11 @@ struct lpfc_hba {
667/* Maximum number of events that can be outstanding at any time*/ 763/* Maximum number of events that can be outstanding at any time*/
668#define LPFC_MAX_EVT_COUNT 512 764#define LPFC_MAX_EVT_COUNT 512
669 atomic_t fast_event_count; 765 atomic_t fast_event_count;
766 struct lpfc_fcf fcf;
767 uint8_t fc_map[3];
768 uint8_t valid_vlan;
769 uint16_t vlan_id;
770 struct list_head fcf_conn_rec_list;
670}; 771};
671 772
672static inline struct Scsi_Host * 773static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c14f0cbdb125..fc07be5fbce9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -30,8 +30,10 @@
30#include <scsi/scsi_tcq.h> 30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32 32
33#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 34#include "lpfc_hw.h"
34#include "lpfc_sli.h" 35#include "lpfc_sli.h"
36#include "lpfc_sli4.h"
35#include "lpfc_nl.h" 37#include "lpfc_nl.h"
36#include "lpfc_disc.h" 38#include "lpfc_disc.h"
37#include "lpfc_scsi.h" 39#include "lpfc_scsi.h"
@@ -505,12 +507,14 @@ lpfc_issue_lip(struct Scsi_Host *shost)
505 return -ENOMEM; 507 return -ENOMEM;
506 508
507 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 509 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
508 pmboxq->mb.mbxCommand = MBX_DOWN_LINK; 510 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
509 pmboxq->mb.mbxOwner = OWN_HOST; 511 pmboxq->u.mb.mbxOwner = OWN_HOST;
510 512
511 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); 513 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
512 514
513 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { 515 if ((mbxstatus == MBX_SUCCESS) &&
516 (pmboxq->u.mb.mbxStatus == 0 ||
517 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
514 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 518 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
515 lpfc_init_link(phba, pmboxq, phba->cfg_topology, 519 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
516 phba->cfg_link_speed); 520 phba->cfg_link_speed);
@@ -789,7 +793,8 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
789 uint32_t *mrpi, uint32_t *arpi, 793 uint32_t *mrpi, uint32_t *arpi,
790 uint32_t *mvpi, uint32_t *avpi) 794 uint32_t *mvpi, uint32_t *avpi)
791{ 795{
792 struct lpfc_sli *psli = &phba->sli; 796 struct lpfc_sli *psli = &phba->sli;
797 struct lpfc_mbx_read_config *rd_config;
793 LPFC_MBOXQ_t *pmboxq; 798 LPFC_MBOXQ_t *pmboxq;
794 MAILBOX_t *pmb; 799 MAILBOX_t *pmb;
795 int rc = 0; 800 int rc = 0;
@@ -800,7 +805,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
800 */ 805 */
801 if (phba->link_state < LPFC_LINK_DOWN || 806 if (phba->link_state < LPFC_LINK_DOWN ||
802 !phba->mbox_mem_pool || 807 !phba->mbox_mem_pool ||
803 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) 808 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
804 return 0; 809 return 0;
805 810
806 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 811 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -811,13 +816,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
811 return 0; 816 return 0;
812 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 817 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
813 818
814 pmb = &pmboxq->mb; 819 pmb = &pmboxq->u.mb;
815 pmb->mbxCommand = MBX_READ_CONFIG; 820 pmb->mbxCommand = MBX_READ_CONFIG;
816 pmb->mbxOwner = OWN_HOST; 821 pmb->mbxOwner = OWN_HOST;
817 pmboxq->context1 = NULL; 822 pmboxq->context1 = NULL;
818 823
819 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) || 824 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
820 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 825 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
821 rc = MBX_NOT_FINISHED; 826 rc = MBX_NOT_FINISHED;
822 else 827 else
823 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 828 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -828,18 +833,37 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
828 return 0; 833 return 0;
829 } 834 }
830 835
831 if (mrpi) 836 if (phba->sli_rev == LPFC_SLI_REV4) {
832 *mrpi = pmb->un.varRdConfig.max_rpi; 837 rd_config = &pmboxq->u.mqe.un.rd_config;
833 if (arpi) 838 if (mrpi)
834 *arpi = pmb->un.varRdConfig.avail_rpi; 839 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
835 if (mxri) 840 if (arpi)
836 *mxri = pmb->un.varRdConfig.max_xri; 841 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
837 if (axri) 842 phba->sli4_hba.max_cfg_param.rpi_used;
838 *axri = pmb->un.varRdConfig.avail_xri; 843 if (mxri)
839 if (mvpi) 844 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
840 *mvpi = pmb->un.varRdConfig.max_vpi; 845 if (axri)
841 if (avpi) 846 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
842 *avpi = pmb->un.varRdConfig.avail_vpi; 847 phba->sli4_hba.max_cfg_param.xri_used;
848 if (mvpi)
849 *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
850 if (avpi)
851 *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) -
852 phba->sli4_hba.max_cfg_param.vpi_used;
853 } else {
854 if (mrpi)
855 *mrpi = pmb->un.varRdConfig.max_rpi;
856 if (arpi)
857 *arpi = pmb->un.varRdConfig.avail_rpi;
858 if (mxri)
859 *mxri = pmb->un.varRdConfig.max_xri;
860 if (axri)
861 *axri = pmb->un.varRdConfig.avail_xri;
862 if (mvpi)
863 *mvpi = pmb->un.varRdConfig.max_vpi;
864 if (avpi)
865 *avpi = pmb->un.varRdConfig.avail_vpi;
866 }
843 867
844 mempool_free(pmboxq, phba->mbox_mem_pool); 868 mempool_free(pmboxq, phba->mbox_mem_pool);
845 return 1; 869 return 1;
@@ -2021,22 +2045,9 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
2021# lpfc_log_verbose: Only turn this flag on if you are willing to risk being 2045# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
2022# deluged with LOTS of information. 2046# deluged with LOTS of information.
2023# You can set a bit mask to record specific types of verbose messages: 2047# You can set a bit mask to record specific types of verbose messages:
2024# 2048# See lpfc_logmsh.h for definitions.
2025# LOG_ELS 0x1 ELS events
2026# LOG_DISCOVERY 0x2 Link discovery events
2027# LOG_MBOX 0x4 Mailbox events
2028# LOG_INIT 0x8 Initialization events
2029# LOG_LINK_EVENT 0x10 Link events
2030# LOG_FCP 0x40 FCP traffic history
2031# LOG_NODE 0x80 Node table events
2032# LOG_BG 0x200 BlockBuard events
2033# LOG_MISC 0x400 Miscellaneous events
2034# LOG_SLI 0x800 SLI events
2035# LOG_FCP_ERROR 0x1000 Only log FCP errors
2036# LOG_LIBDFC 0x2000 LIBDFC events
2037# LOG_ALL_MSG 0xffff LOG all messages
2038*/ 2049*/
2039LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff, 2050LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
2040 "Verbose logging bit-mask"); 2051 "Verbose logging bit-mask");
2041 2052
2042/* 2053/*
@@ -2266,6 +2277,36 @@ lpfc_param_init(topology, 0, 0, 6)
2266static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, 2277static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
2267 lpfc_topology_show, lpfc_topology_store); 2278 lpfc_topology_show, lpfc_topology_store);
2268 2279
2280/**
2281 * lpfc_static_vport_show: Read callback function for
2282 * lpfc_static_vport sysfs file.
2283 * @dev: Pointer to class device object.
2284 * @attr: device attribute structure.
2285 * @buf: Data buffer.
2286 *
2287 * This function is the read call back function for
2288 * lpfc_static_vport sysfs file. The lpfc_static_vport
2289 * sysfs file report the mageability of the vport.
2290 **/
2291static ssize_t
2292lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
2293 char *buf)
2294{
2295 struct Scsi_Host *shost = class_to_shost(dev);
2296 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2297 if (vport->vport_flag & STATIC_VPORT)
2298 sprintf(buf, "1\n");
2299 else
2300 sprintf(buf, "0\n");
2301
2302 return strlen(buf);
2303}
2304
2305/*
2306 * Sysfs attribute to control the statistical data collection.
2307 */
2308static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
2309 lpfc_static_vport_show, NULL);
2269 2310
2270/** 2311/**
2271 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file 2312 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
@@ -2341,7 +2382,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2341 if (vports == NULL) 2382 if (vports == NULL)
2342 return -ENOMEM; 2383 return -ENOMEM;
2343 2384
2344 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2385 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2345 v_shost = lpfc_shost_from_vport(vports[i]); 2386 v_shost = lpfc_shost_from_vport(vports[i]);
2346 spin_lock_irq(v_shost->host_lock); 2387 spin_lock_irq(v_shost->host_lock);
2347 /* Block and reset data collection */ 2388 /* Block and reset data collection */
@@ -2356,7 +2397,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2356 phba->bucket_base = base; 2397 phba->bucket_base = base;
2357 phba->bucket_step = step; 2398 phba->bucket_step = step;
2358 2399
2359 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2400 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2360 v_shost = lpfc_shost_from_vport(vports[i]); 2401 v_shost = lpfc_shost_from_vport(vports[i]);
2361 2402
2362 /* Unblock data collection */ 2403 /* Unblock data collection */
@@ -2373,7 +2414,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2373 if (vports == NULL) 2414 if (vports == NULL)
2374 return -ENOMEM; 2415 return -ENOMEM;
2375 2416
2376 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2417 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2377 v_shost = lpfc_shost_from_vport(vports[i]); 2418 v_shost = lpfc_shost_from_vport(vports[i]);
2378 spin_lock_irq(shost->host_lock); 2419 spin_lock_irq(shost->host_lock);
2379 vports[i]->stat_data_blocked = 1; 2420 vports[i]->stat_data_blocked = 1;
@@ -2844,15 +2885,39 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
2844/* 2885/*
2845# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that 2886# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
2846# support this feature 2887# support this feature
2847# 0 = MSI disabled 2888# 0 = MSI disabled (default)
2848# 1 = MSI enabled 2889# 1 = MSI enabled
2849# 2 = MSI-X enabled (default) 2890# 2 = MSI-X enabled
2850# Value range is [0,2]. Default value is 2. 2891# Value range is [0,2]. Default value is 0.
2851*/ 2892*/
2852LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " 2893LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
2853 "MSI-X (2), if possible"); 2894 "MSI-X (2), if possible");
2854 2895
2855/* 2896/*
2897# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second
2898#
2899# Value range is [636,651042]. Default value is 10000.
2900*/
2901LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST,
2902 "Set the maximum number of fast-path FCP interrupts per second");
2903
2904/*
2905# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
2906#
2907# Value range is [1,31]. Default value is 4.
2908*/
2909LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX,
2910 "Set the number of fast-path FCP work queues, if possible");
2911
2912/*
2913# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues
2914#
2915# Value range is [1,7]. Default value is 1.
2916*/
2917LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
2918 "Set the number of fast-path FCP event queues, if possible");
2919
2920/*
2856# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. 2921# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
2857# 0 = HBA resets disabled 2922# 0 = HBA resets disabled
2858# 1 = HBA resets enabled (default) 2923# 1 = HBA resets enabled (default)
@@ -2876,6 +2941,14 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
2876*/ 2941*/
2877LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); 2942LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
2878 2943
2944/*
2945# lpfc_enable_fip: When set, FIP is required to start discovery. If not
2946# set, the driver will add an FCF record manually if the port has no
2947# FCF records available and start discovery.
2948# Value range is [0,1]. Default value is 1 (enabled)
2949*/
2950LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
2951
2879 2952
2880/* 2953/*
2881# lpfc_prot_mask: i 2954# lpfc_prot_mask: i
@@ -2942,6 +3015,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
2942 &dev_attr_lpfc_peer_port_login, 3015 &dev_attr_lpfc_peer_port_login,
2943 &dev_attr_lpfc_nodev_tmo, 3016 &dev_attr_lpfc_nodev_tmo,
2944 &dev_attr_lpfc_devloss_tmo, 3017 &dev_attr_lpfc_devloss_tmo,
3018 &dev_attr_lpfc_enable_fip,
2945 &dev_attr_lpfc_fcp_class, 3019 &dev_attr_lpfc_fcp_class,
2946 &dev_attr_lpfc_use_adisc, 3020 &dev_attr_lpfc_use_adisc,
2947 &dev_attr_lpfc_ack0, 3021 &dev_attr_lpfc_ack0,
@@ -2969,6 +3043,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
2969 &dev_attr_lpfc_poll, 3043 &dev_attr_lpfc_poll,
2970 &dev_attr_lpfc_poll_tmo, 3044 &dev_attr_lpfc_poll_tmo,
2971 &dev_attr_lpfc_use_msi, 3045 &dev_attr_lpfc_use_msi,
3046 &dev_attr_lpfc_fcp_imax,
3047 &dev_attr_lpfc_fcp_wq_count,
3048 &dev_attr_lpfc_fcp_eq_count,
2972 &dev_attr_lpfc_enable_bg, 3049 &dev_attr_lpfc_enable_bg,
2973 &dev_attr_lpfc_soft_wwnn, 3050 &dev_attr_lpfc_soft_wwnn,
2974 &dev_attr_lpfc_soft_wwpn, 3051 &dev_attr_lpfc_soft_wwpn,
@@ -2991,6 +3068,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
2991 &dev_attr_lpfc_lun_queue_depth, 3068 &dev_attr_lpfc_lun_queue_depth,
2992 &dev_attr_lpfc_nodev_tmo, 3069 &dev_attr_lpfc_nodev_tmo,
2993 &dev_attr_lpfc_devloss_tmo, 3070 &dev_attr_lpfc_devloss_tmo,
3071 &dev_attr_lpfc_enable_fip,
2994 &dev_attr_lpfc_hba_queue_depth, 3072 &dev_attr_lpfc_hba_queue_depth,
2995 &dev_attr_lpfc_peer_port_login, 3073 &dev_attr_lpfc_peer_port_login,
2996 &dev_attr_lpfc_restrict_login, 3074 &dev_attr_lpfc_restrict_login,
@@ -3003,6 +3081,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
3003 &dev_attr_lpfc_enable_da_id, 3081 &dev_attr_lpfc_enable_da_id,
3004 &dev_attr_lpfc_max_scsicmpl_time, 3082 &dev_attr_lpfc_max_scsicmpl_time,
3005 &dev_attr_lpfc_stat_data_ctrl, 3083 &dev_attr_lpfc_stat_data_ctrl,
3084 &dev_attr_lpfc_static_vport,
3006 NULL, 3085 NULL,
3007}; 3086};
3008 3087
@@ -3034,6 +3113,9 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
3034 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3113 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3035 struct lpfc_hba *phba = vport->phba; 3114 struct lpfc_hba *phba = vport->phba;
3036 3115
3116 if (phba->sli_rev >= LPFC_SLI_REV4)
3117 return -EPERM;
3118
3037 if ((off + count) > FF_REG_AREA_SIZE) 3119 if ((off + count) > FF_REG_AREA_SIZE)
3038 return -ERANGE; 3120 return -ERANGE;
3039 3121
@@ -3084,6 +3166,9 @@ sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3084 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3166 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3085 struct lpfc_hba *phba = vport->phba; 3167 struct lpfc_hba *phba = vport->phba;
3086 3168
3169 if (phba->sli_rev >= LPFC_SLI_REV4)
3170 return -EPERM;
3171
3087 if (off > FF_REG_AREA_SIZE) 3172 if (off > FF_REG_AREA_SIZE)
3088 return -ERANGE; 3173 return -ERANGE;
3089 3174
@@ -3199,7 +3284,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
3199 } 3284 }
3200 } 3285 }
3201 3286
3202 memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off, 3287 memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
3203 buf, count); 3288 buf, count);
3204 3289
3205 phba->sysfs_mbox.offset = off + count; 3290 phba->sysfs_mbox.offset = off + count;
@@ -3241,6 +3326,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3241 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3326 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3242 struct lpfc_hba *phba = vport->phba; 3327 struct lpfc_hba *phba = vport->phba;
3243 int rc; 3328 int rc;
3329 MAILBOX_t *pmb;
3244 3330
3245 if (off > MAILBOX_CMD_SIZE) 3331 if (off > MAILBOX_CMD_SIZE)
3246 return -ERANGE; 3332 return -ERANGE;
@@ -3265,8 +3351,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3265 if (off == 0 && 3351 if (off == 0 &&
3266 phba->sysfs_mbox.state == SMBOX_WRITING && 3352 phba->sysfs_mbox.state == SMBOX_WRITING &&
3267 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) { 3353 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
3268 3354 pmb = &phba->sysfs_mbox.mbox->u.mb;
3269 switch (phba->sysfs_mbox.mbox->mb.mbxCommand) { 3355 switch (pmb->mbxCommand) {
3270 /* Offline only */ 3356 /* Offline only */
3271 case MBX_INIT_LINK: 3357 case MBX_INIT_LINK:
3272 case MBX_DOWN_LINK: 3358 case MBX_DOWN_LINK:
@@ -3283,7 +3369,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3283 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 3369 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3284 printk(KERN_WARNING "mbox_read:Command 0x%x " 3370 printk(KERN_WARNING "mbox_read:Command 0x%x "
3285 "is illegal in on-line state\n", 3371 "is illegal in on-line state\n",
3286 phba->sysfs_mbox.mbox->mb.mbxCommand); 3372 pmb->mbxCommand);
3287 sysfs_mbox_idle(phba); 3373 sysfs_mbox_idle(phba);
3288 spin_unlock_irq(&phba->hbalock); 3374 spin_unlock_irq(&phba->hbalock);
3289 return -EPERM; 3375 return -EPERM;
@@ -3319,13 +3405,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3319 case MBX_CONFIG_PORT: 3405 case MBX_CONFIG_PORT:
3320 case MBX_RUN_BIU_DIAG: 3406 case MBX_RUN_BIU_DIAG:
3321 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n", 3407 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
3322 phba->sysfs_mbox.mbox->mb.mbxCommand); 3408 pmb->mbxCommand);
3323 sysfs_mbox_idle(phba); 3409 sysfs_mbox_idle(phba);
3324 spin_unlock_irq(&phba->hbalock); 3410 spin_unlock_irq(&phba->hbalock);
3325 return -EPERM; 3411 return -EPERM;
3326 default: 3412 default:
3327 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n", 3413 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
3328 phba->sysfs_mbox.mbox->mb.mbxCommand); 3414 pmb->mbxCommand);
3329 sysfs_mbox_idle(phba); 3415 sysfs_mbox_idle(phba);
3330 spin_unlock_irq(&phba->hbalock); 3416 spin_unlock_irq(&phba->hbalock);
3331 return -EPERM; 3417 return -EPERM;
@@ -3335,14 +3421,14 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3335 * or RESTART mailbox commands until the HBA is restarted. 3421 * or RESTART mailbox commands until the HBA is restarted.
3336 */ 3422 */
3337 if (phba->pport->stopped && 3423 if (phba->pport->stopped &&
3338 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY && 3424 pmb->mbxCommand != MBX_DUMP_MEMORY &&
3339 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART && 3425 pmb->mbxCommand != MBX_RESTART &&
3340 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS && 3426 pmb->mbxCommand != MBX_WRITE_VPARMS &&
3341 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN) 3427 pmb->mbxCommand != MBX_WRITE_WWN)
3342 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 3428 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
3343 "1259 mbox: Issued mailbox cmd " 3429 "1259 mbox: Issued mailbox cmd "
3344 "0x%x while in stopped state.\n", 3430 "0x%x while in stopped state.\n",
3345 phba->sysfs_mbox.mbox->mb.mbxCommand); 3431 pmb->mbxCommand);
3346 3432
3347 phba->sysfs_mbox.mbox->vport = vport; 3433 phba->sysfs_mbox.mbox->vport = vport;
3348 3434
@@ -3356,7 +3442,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3356 } 3442 }
3357 3443
3358 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3444 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3359 (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){ 3445 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
3360 3446
3361 spin_unlock_irq(&phba->hbalock); 3447 spin_unlock_irq(&phba->hbalock);
3362 rc = lpfc_sli_issue_mbox (phba, 3448 rc = lpfc_sli_issue_mbox (phba,
@@ -3368,8 +3454,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3368 spin_unlock_irq(&phba->hbalock); 3454 spin_unlock_irq(&phba->hbalock);
3369 rc = lpfc_sli_issue_mbox_wait (phba, 3455 rc = lpfc_sli_issue_mbox_wait (phba,
3370 phba->sysfs_mbox.mbox, 3456 phba->sysfs_mbox.mbox,
3371 lpfc_mbox_tmo_val(phba, 3457 lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ);
3372 phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
3373 spin_lock_irq(&phba->hbalock); 3458 spin_lock_irq(&phba->hbalock);
3374 } 3459 }
3375 3460
@@ -3391,7 +3476,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3391 return -EAGAIN; 3476 return -EAGAIN;
3392 } 3477 }
3393 3478
3394 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); 3479 memcpy(buf, (uint8_t *) &pmb + off, count);
3395 3480
3396 phba->sysfs_mbox.offset = off + count; 3481 phba->sysfs_mbox.offset = off + count;
3397 3482
@@ -3585,6 +3670,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
3585 case LA_8GHZ_LINK: 3670 case LA_8GHZ_LINK:
3586 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; 3671 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
3587 break; 3672 break;
3673 case LA_10GHZ_LINK:
3674 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
3675 break;
3588 default: 3676 default:
3589 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 3677 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
3590 break; 3678 break;
@@ -3652,7 +3740,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
3652 */ 3740 */
3653 if (phba->link_state < LPFC_LINK_DOWN || 3741 if (phba->link_state < LPFC_LINK_DOWN ||
3654 !phba->mbox_mem_pool || 3742 !phba->mbox_mem_pool ||
3655 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) 3743 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
3656 return NULL; 3744 return NULL;
3657 3745
3658 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 3746 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -3663,14 +3751,14 @@ lpfc_get_stats(struct Scsi_Host *shost)
3663 return NULL; 3751 return NULL;
3664 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 3752 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
3665 3753
3666 pmb = &pmboxq->mb; 3754 pmb = &pmboxq->u.mb;
3667 pmb->mbxCommand = MBX_READ_STATUS; 3755 pmb->mbxCommand = MBX_READ_STATUS;
3668 pmb->mbxOwner = OWN_HOST; 3756 pmb->mbxOwner = OWN_HOST;
3669 pmboxq->context1 = NULL; 3757 pmboxq->context1 = NULL;
3670 pmboxq->vport = vport; 3758 pmboxq->vport = vport;
3671 3759
3672 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3760 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3673 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3761 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3674 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3762 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3675 else 3763 else
3676 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3764 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3695,7 +3783,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
3695 pmboxq->vport = vport; 3783 pmboxq->vport = vport;
3696 3784
3697 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3785 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3698 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3786 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3699 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3787 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3700 else 3788 else
3701 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3789 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3769,7 +3857,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3769 return; 3857 return;
3770 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 3858 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3771 3859
3772 pmb = &pmboxq->mb; 3860 pmb = &pmboxq->u.mb;
3773 pmb->mbxCommand = MBX_READ_STATUS; 3861 pmb->mbxCommand = MBX_READ_STATUS;
3774 pmb->mbxOwner = OWN_HOST; 3862 pmb->mbxOwner = OWN_HOST;
3775 pmb->un.varWords[0] = 0x1; /* reset request */ 3863 pmb->un.varWords[0] = 0x1; /* reset request */
@@ -3777,7 +3865,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3777 pmboxq->vport = vport; 3865 pmboxq->vport = vport;
3778 3866
3779 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3867 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3780 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3868 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3781 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3869 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3782 else 3870 else
3783 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3871 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3795,7 +3883,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3795 pmboxq->vport = vport; 3883 pmboxq->vport = vport;
3796 3884
3797 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3885 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3798 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3886 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3799 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3887 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3800 else 3888 else
3801 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3889 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3962,6 +4050,21 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
3962 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); 4050 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
3963} 4051}
3964 4052
4053/**
4054 * lpfc_hba_log_verbose_init - Set hba's log verbose level
4055 * @phba: Pointer to lpfc_hba struct.
4056 *
4057 * This function is called by the lpfc_get_cfgparam() routine to set the
4058 * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
4059 * log messsage according to the module's lpfc_log_verbose parameter setting
4060 * before hba port or vport created.
4061 **/
4062static void
4063lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
4064{
4065 phba->cfg_log_verbose = verbose;
4066}
4067
3965struct fc_function_template lpfc_transport_functions = { 4068struct fc_function_template lpfc_transport_functions = {
3966 /* fixed attributes the driver supports */ 4069 /* fixed attributes the driver supports */
3967 .show_host_node_name = 1, 4070 .show_host_node_name = 1,
@@ -4105,6 +4208,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4105 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 4208 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
4106 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 4209 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
4107 lpfc_use_msi_init(phba, lpfc_use_msi); 4210 lpfc_use_msi_init(phba, lpfc_use_msi);
4211 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
4212 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
4213 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
4108 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 4214 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
4109 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 4215 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
4110 lpfc_enable_bg_init(phba, lpfc_enable_bg); 4216 lpfc_enable_bg_init(phba, lpfc_enable_bg);
@@ -4113,26 +4219,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4113 phba->cfg_soft_wwpn = 0L; 4219 phba->cfg_soft_wwpn = 0L;
4114 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 4220 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
4115 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); 4221 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
4116 /*
4117 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4118 * used to create the sg_dma_buf_pool must be dynamically calculated.
4119 * 2 segments are added since the IOCB needs a command and response bde.
4120 */
4121 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4122 sizeof(struct fcp_rsp) +
4123 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4124
4125 if (phba->cfg_enable_bg) {
4126 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4127 phba->cfg_sg_dma_buf_size +=
4128 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4129 }
4130
4131 /* Also reinitialize the host templates with new values. */
4132 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4133 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4134
4135 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 4222 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
4223 lpfc_enable_fip_init(phba, lpfc_enable_fip);
4224 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4225
4136 return; 4226 return;
4137} 4227}
4138 4228
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f88ce3f26190..d2a922997c0f 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
23struct fc_rport; 23struct fc_rport;
24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); 25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
26void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
27int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
26void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 28void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
27void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 29void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
28 30
@@ -35,17 +37,19 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
35int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); 37int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
36void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); 38void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
37void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 39void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
38int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, 40int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
39 LPFC_MBOXQ_t *, uint32_t); 41 LPFC_MBOXQ_t *, uint32_t);
40void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 42void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
41void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 43void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
42void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 44void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
43void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); 45void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
44void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 46void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
47void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
45 48
46struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 49struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
47void lpfc_cleanup_rpis(struct lpfc_vport *, int); 50void lpfc_cleanup_rpis(struct lpfc_vport *, int);
48int lpfc_linkdown(struct lpfc_hba *); 51int lpfc_linkdown(struct lpfc_hba *);
52void lpfc_linkdown_port(struct lpfc_vport *);
49void lpfc_port_link_failure(struct lpfc_vport *); 53void lpfc_port_link_failure(struct lpfc_vport *);
50void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 54void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
51 55
@@ -54,6 +58,7 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
54void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 58void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
55void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 59void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
56void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 60void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
61void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
57void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); 62void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
58void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); 63void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
59struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, 64struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -105,6 +110,7 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
105int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); 110int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
106int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *); 111int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
107int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t); 112int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
113int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
108int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 114int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
109int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 115int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
110int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, 116int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
@@ -149,15 +155,19 @@ int lpfc_online(struct lpfc_hba *);
149void lpfc_unblock_mgmt_io(struct lpfc_hba *); 155void lpfc_unblock_mgmt_io(struct lpfc_hba *);
150void lpfc_offline_prep(struct lpfc_hba *); 156void lpfc_offline_prep(struct lpfc_hba *);
151void lpfc_offline(struct lpfc_hba *); 157void lpfc_offline(struct lpfc_hba *);
158void lpfc_reset_hba(struct lpfc_hba *);
152 159
153int lpfc_sli_setup(struct lpfc_hba *); 160int lpfc_sli_setup(struct lpfc_hba *);
154int lpfc_sli_queue_setup(struct lpfc_hba *); 161int lpfc_sli_queue_setup(struct lpfc_hba *);
155 162
156void lpfc_handle_eratt(struct lpfc_hba *); 163void lpfc_handle_eratt(struct lpfc_hba *);
157void lpfc_handle_latt(struct lpfc_hba *); 164void lpfc_handle_latt(struct lpfc_hba *);
158irqreturn_t lpfc_intr_handler(int, void *); 165irqreturn_t lpfc_sli_intr_handler(int, void *);
159irqreturn_t lpfc_sp_intr_handler(int, void *); 166irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
160irqreturn_t lpfc_fp_intr_handler(int, void *); 167irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
168irqreturn_t lpfc_sli4_intr_handler(int, void *);
169irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
170irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
161 171
162void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); 172void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
163void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); 173void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -165,16 +175,32 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
165void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); 175void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
166void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 176void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
167LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); 177LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
178void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
168void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 179void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
180int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
181int lpfc_mbox_dev_check(struct lpfc_hba *);
169int lpfc_mbox_tmo_val(struct lpfc_hba *, int); 182int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
183void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
184void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
185void lpfc_init_vpi(struct lpfcMboxq *, uint16_t);
186void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
187void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
188void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
189void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
170 190
171void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, 191void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
172 uint32_t , LPFC_MBOXQ_t *); 192 uint32_t , LPFC_MBOXQ_t *);
173struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); 193struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
174void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); 194void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
195struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
196void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
197void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
198 uint16_t);
199void lpfc_unregister_unused_fcf(struct lpfc_hba *);
175 200
176int lpfc_mem_alloc(struct lpfc_hba *); 201int lpfc_mem_alloc(struct lpfc_hba *, int align);
177void lpfc_mem_free(struct lpfc_hba *); 202void lpfc_mem_free(struct lpfc_hba *);
203void lpfc_mem_free_all(struct lpfc_hba *);
178void lpfc_stop_vport_timers(struct lpfc_vport *); 204void lpfc_stop_vport_timers(struct lpfc_vport *);
179 205
180void lpfc_poll_timeout(unsigned long ptr); 206void lpfc_poll_timeout(unsigned long ptr);
@@ -186,6 +212,7 @@ void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
186uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); 212uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
187void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, 213void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
188 uint32_t); 214 uint32_t);
215void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
189 216
190void lpfc_reset_barrier(struct lpfc_hba * phba); 217void lpfc_reset_barrier(struct lpfc_hba * phba);
191int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); 218int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
@@ -198,12 +225,13 @@ int lpfc_sli_host_down(struct lpfc_vport *);
198int lpfc_sli_hba_down(struct lpfc_hba *); 225int lpfc_sli_hba_down(struct lpfc_hba *);
199int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 226int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
200int lpfc_sli_handle_mb_event(struct lpfc_hba *); 227int lpfc_sli_handle_mb_event(struct lpfc_hba *);
201int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); 228void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
202int lpfc_sli_check_eratt(struct lpfc_hba *); 229int lpfc_sli_check_eratt(struct lpfc_hba *);
203int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 230void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
204 struct lpfc_sli_ring *, uint32_t); 231 struct lpfc_sli_ring *, uint32_t);
232int lpfc_sli4_handle_received_buffer(struct lpfc_hba *);
205void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 233void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
206int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, 234int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
207 struct lpfc_iocbq *, uint32_t); 235 struct lpfc_iocbq *, uint32_t);
208void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 236void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
209void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 237void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
@@ -237,7 +265,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
237 265
238int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 266int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
239 267
240int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *, 268int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
241 struct lpfc_iocbq *, struct lpfc_iocbq *, 269 struct lpfc_iocbq *, struct lpfc_iocbq *,
242 uint32_t); 270 uint32_t);
243void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, 271void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
@@ -254,6 +282,12 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
254const char* lpfc_info(struct Scsi_Host *); 282const char* lpfc_info(struct Scsi_Host *);
255int lpfc_scan_finished(struct Scsi_Host *, unsigned long); 283int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
256 284
285int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t);
286int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t);
287int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t);
288int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t);
289int lpfc_api_table_setup(struct lpfc_hba *, uint8_t);
290
257void lpfc_get_cfgparam(struct lpfc_hba *); 291void lpfc_get_cfgparam(struct lpfc_hba *);
258void lpfc_get_vport_cfgparam(struct lpfc_vport *); 292void lpfc_get_vport_cfgparam(struct lpfc_vport *);
259int lpfc_alloc_sysfs_attr(struct lpfc_vport *); 293int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
@@ -314,8 +348,15 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
314 struct lpfc_iocbq *); 348 struct lpfc_iocbq *);
315struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *); 349struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
316void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); 350void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
351void lpfc_create_static_vport(struct lpfc_hba *);
352void lpfc_stop_hba_timers(struct lpfc_hba *);
353void lpfc_stop_port(struct lpfc_hba *);
354void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
355int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
356void lpfc_start_fdiscs(struct lpfc_hba *phba);
317 357
318#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 358#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
319#define HBA_EVENT_RSCN 5 359#define HBA_EVENT_RSCN 5
320#define HBA_EVENT_LINK_UP 2 360#define HBA_EVENT_LINK_UP 2
321#define HBA_EVENT_LINK_DOWN 3 361#define HBA_EVENT_LINK_DOWN 3
362
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 896c7b0351e5..0e532f072eb3 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -32,8 +32,10 @@
32#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34 34
35#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 36#include "lpfc_hw.h"
36#include "lpfc_sli.h" 37#include "lpfc_sli.h"
38#include "lpfc_sli4.h"
37#include "lpfc_nl.h" 39#include "lpfc_nl.h"
38#include "lpfc_disc.h" 40#include "lpfc_disc.h"
39#include "lpfc_scsi.h" 41#include "lpfc_scsi.h"
@@ -267,8 +269,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
267 uint32_t tmo, uint8_t retry) 269 uint32_t tmo, uint8_t retry)
268{ 270{
269 struct lpfc_hba *phba = vport->phba; 271 struct lpfc_hba *phba = vport->phba;
270 struct lpfc_sli *psli = &phba->sli;
271 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
272 IOCB_t *icmd; 272 IOCB_t *icmd;
273 struct lpfc_iocbq *geniocb; 273 struct lpfc_iocbq *geniocb;
274 int rc; 274 int rc;
@@ -331,7 +331,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
331 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; 331 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
332 geniocb->vport = vport; 332 geniocb->vport = vport;
333 geniocb->retry = retry; 333 geniocb->retry = retry;
334 rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0); 334 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
335 335
336 if (rc == IOCB_ERROR) { 336 if (rc == IOCB_ERROR) {
337 lpfc_sli_release_iocbq(phba, geniocb); 337 lpfc_sli_release_iocbq(phba, geniocb);
@@ -1578,6 +1578,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1578 case LA_8GHZ_LINK: 1578 case LA_8GHZ_LINK:
1579 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT; 1579 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
1580 break; 1580 break;
1581 case LA_10GHZ_LINK:
1582 ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
1583 break;
1581 default: 1584 default:
1582 ae->un.PortSpeed = 1585 ae->un.PortSpeed =
1583 HBA_PORTSPEED_UNKNOWN; 1586 HBA_PORTSPEED_UNKNOWN;
@@ -1729,8 +1732,10 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1729 uint32_t *ptr, str[4]; 1732 uint32_t *ptr, str[4];
1730 uint8_t *fwname; 1733 uint8_t *fwname;
1731 1734
1732 if (vp->rev.rBit) { 1735 if (phba->sli_rev == LPFC_SLI_REV4)
1733 if (psli->sli_flag & LPFC_SLI2_ACTIVE) 1736 sprintf(fwrevision, "%s", vp->rev.opFwName);
1737 else if (vp->rev.rBit) {
1738 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1734 rev = vp->rev.sli2FwRev; 1739 rev = vp->rev.sli2FwRev;
1735 else 1740 else
1736 rev = vp->rev.sli1FwRev; 1741 rev = vp->rev.sli1FwRev;
@@ -1756,7 +1761,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1756 } 1761 }
1757 b4 = (rev & 0x0000000f); 1762 b4 = (rev & 0x0000000f);
1758 1763
1759 if (psli->sli_flag & LPFC_SLI2_ACTIVE) 1764 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1760 fwname = vp->rev.sli2FwName; 1765 fwname = vp->rev.sli2FwName;
1761 else 1766 else
1762 fwname = vp->rev.sli1FwName; 1767 fwname = vp->rev.sli1FwName;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 52be5644e07a..8d0f0de76b63 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2007-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2007-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -33,8 +33,10 @@
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35 35
36#include "lpfc_hw4.h"
36#include "lpfc_hw.h" 37#include "lpfc_hw.h"
37#include "lpfc_sli.h" 38#include "lpfc_sli.h"
39#include "lpfc_sli4.h"
38#include "lpfc_nl.h" 40#include "lpfc_nl.h"
39#include "lpfc_disc.h" 41#include "lpfc_disc.h"
40#include "lpfc_scsi.h" 42#include "lpfc_scsi.h"
@@ -51,8 +53,7 @@
51 * debugfs interface 53 * debugfs interface
52 * 54 *
53 * To access this interface the user should: 55 * To access this interface the user should:
54 * # mkdir /debug 56 * # mount -t debugfs none /sys/kernel/debug
55 * # mount -t debugfs none /debug
56 * 57 *
57 * The lpfc debugfs directory hierarchy is: 58 * The lpfc debugfs directory hierarchy is:
58 * lpfc/lpfcX/vportY 59 * lpfc/lpfcX/vportY
@@ -280,6 +281,8 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
280 struct lpfc_dmabuf *d_buf; 281 struct lpfc_dmabuf *d_buf;
281 struct hbq_dmabuf *hbq_buf; 282 struct hbq_dmabuf *hbq_buf;
282 283
284 if (phba->sli_rev != 3)
285 return 0;
283 cnt = LPFC_HBQINFO_SIZE; 286 cnt = LPFC_HBQINFO_SIZE;
284 spin_lock_irq(&phba->hbalock); 287 spin_lock_irq(&phba->hbalock);
285 288
@@ -489,12 +492,15 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
489 pring->next_cmdidx, pring->local_getidx, 492 pring->next_cmdidx, pring->local_getidx,
490 pring->flag, pgpp->rspPutInx, pring->numRiocb); 493 pring->flag, pgpp->rspPutInx, pring->numRiocb);
491 } 494 }
492 word0 = readl(phba->HAregaddr); 495
493 word1 = readl(phba->CAregaddr); 496 if (phba->sli_rev <= LPFC_SLI_REV3) {
494 word2 = readl(phba->HSregaddr); 497 word0 = readl(phba->HAregaddr);
495 word3 = readl(phba->HCregaddr); 498 word1 = readl(phba->CAregaddr);
496 len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n", 499 word2 = readl(phba->HSregaddr);
497 word0, word1, word2, word3); 500 word3 = readl(phba->HCregaddr);
501 len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
502 "HC:%08x\n", word0, word1, word2, word3);
503 }
498 spin_unlock_irq(&phba->hbalock); 504 spin_unlock_irq(&phba->hbalock);
499 return len; 505 return len;
500} 506}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index ffd108972072..1142070e9484 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -135,6 +135,7 @@ struct lpfc_nodelist {
135#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ 135#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
136#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ 136#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
137#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ 137#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
138#define NLP_RPI_VALID 0x80000000 /* nlp_rpi is valid */
138 139
139/* ndlp usage management macros */ 140/* ndlp usage management macros */
140#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ 141#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b8b34cf5c3d2..f72fdf23bf1b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -84,7 +86,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
84 uint32_t ha_copy; 86 uint32_t ha_copy;
85 87
86 if (vport->port_state >= LPFC_VPORT_READY || 88 if (vport->port_state >= LPFC_VPORT_READY ||
87 phba->link_state == LPFC_LINK_DOWN) 89 phba->link_state == LPFC_LINK_DOWN ||
90 phba->sli_rev > LPFC_SLI_REV3)
88 return 0; 91 return 0;
89 92
90 /* Read the HBA Host Attention Register */ 93 /* Read the HBA Host Attention Register */
@@ -165,6 +168,19 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
165 if (elsiocb == NULL) 168 if (elsiocb == NULL)
166 return NULL; 169 return NULL;
167 170
171 /*
172 * If this command is for fabric controller and HBA running
173 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
174 */
175 if ((did == Fabric_DID) &&
176 bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags) &&
177 ((elscmd == ELS_CMD_FLOGI) ||
178 (elscmd == ELS_CMD_FDISC) ||
179 (elscmd == ELS_CMD_LOGO)))
180 elsiocb->iocb_flag |= LPFC_FIP_ELS;
181 else
182 elsiocb->iocb_flag &= ~LPFC_FIP_ELS;
183
168 icmd = &elsiocb->iocb; 184 icmd = &elsiocb->iocb;
169 185
170 /* fill in BDEs for command */ 186 /* fill in BDEs for command */
@@ -219,7 +235,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
219 icmd->un.elsreq64.myID = vport->fc_myDID; 235 icmd->un.elsreq64.myID = vport->fc_myDID;
220 236
221 /* For ELS_REQUEST64_CR, use the VPI by default */ 237 /* For ELS_REQUEST64_CR, use the VPI by default */
222 icmd->ulpContext = vport->vpi; 238 icmd->ulpContext = vport->vpi + phba->vpi_base;
223 icmd->ulpCt_h = 0; 239 icmd->ulpCt_h = 0;
224 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 240 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
225 if (elscmd == ELS_CMD_ECHO) 241 if (elscmd == ELS_CMD_ECHO)
@@ -305,7 +321,7 @@ els_iocb_free_pcmb_exit:
305 * 0 - successfully issued fabric registration login for @vport 321 * 0 - successfully issued fabric registration login for @vport
306 * -ENXIO -- failed to issue fabric registration login for @vport 322 * -ENXIO -- failed to issue fabric registration login for @vport
307 **/ 323 **/
308static int 324int
309lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 325lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
310{ 326{
311 struct lpfc_hba *phba = vport->phba; 327 struct lpfc_hba *phba = vport->phba;
@@ -345,8 +361,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
345 err = 4; 361 err = 4;
346 goto fail; 362 goto fail;
347 } 363 }
348 rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 364 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
349 0);
350 if (rc) { 365 if (rc) {
351 err = 5; 366 err = 5;
352 goto fail_free_mbox; 367 goto fail_free_mbox;
@@ -386,6 +401,75 @@ fail:
386} 401}
387 402
388/** 403/**
404 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
405 * @vport: pointer to a host virtual N_Port data structure.
406 *
407 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
408 * the @vport. This mailbox command is necessary for FCoE only.
409 *
410 * Return code
411 * 0 - successfully issued REG_VFI for @vport
412 * A failure code otherwise.
413 **/
414static int
415lpfc_issue_reg_vfi(struct lpfc_vport *vport)
416{
417 struct lpfc_hba *phba = vport->phba;
418 LPFC_MBOXQ_t *mboxq;
419 struct lpfc_nodelist *ndlp;
420 struct serv_parm *sp;
421 struct lpfc_dmabuf *dmabuf;
422 int rc = 0;
423
424 sp = &phba->fc_fabparam;
425 ndlp = lpfc_findnode_did(vport, Fabric_DID);
426 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
427 rc = -ENODEV;
428 goto fail;
429 }
430
431 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
432 if (!dmabuf) {
433 rc = -ENOMEM;
434 goto fail;
435 }
436 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
437 if (!dmabuf->virt) {
438 rc = -ENOMEM;
439 goto fail_free_dmabuf;
440 }
441 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
442 if (!mboxq) {
443 rc = -ENOMEM;
444 goto fail_free_coherent;
445 }
446 vport->port_state = LPFC_FABRIC_CFG_LINK;
447 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
448 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
449 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
450 mboxq->vport = vport;
451 mboxq->context1 = dmabuf;
452 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
453 if (rc == MBX_NOT_FINISHED) {
454 rc = -ENXIO;
455 goto fail_free_mbox;
456 }
457 return 0;
458
459fail_free_mbox:
460 mempool_free(mboxq, phba->mbox_mem_pool);
461fail_free_coherent:
462 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
463fail_free_dmabuf:
464 kfree(dmabuf);
465fail:
466 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
467 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
468 "0289 Issue Register VFI failed: Err %d\n", rc);
469 return rc;
470}
471
472/**
389 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 473 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
390 * @vport: pointer to a host virtual N_Port data structure. 474 * @vport: pointer to a host virtual N_Port data structure.
391 * @ndlp: pointer to a node-list data structure. 475 * @ndlp: pointer to a node-list data structure.
@@ -497,17 +581,24 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
497 } 581 }
498 } 582 }
499 583
500 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 584 if (phba->sli_rev < LPFC_SLI_REV4) {
501 585 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
502 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 586 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
503 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) { 587 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
504 lpfc_register_new_vport(phba, vport, ndlp); 588 lpfc_register_new_vport(phba, vport, ndlp);
505 return 0; 589 else
590 lpfc_issue_fabric_reglogin(vport);
591 } else {
592 ndlp->nlp_type |= NLP_FABRIC;
593 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
594 if (vport->vfi_state & LPFC_VFI_REGISTERED) {
595 lpfc_start_fdiscs(phba);
596 lpfc_do_scr_ns_plogi(phba, vport);
597 } else
598 lpfc_issue_reg_vfi(vport);
506 } 599 }
507 lpfc_issue_fabric_reglogin(vport);
508 return 0; 600 return 0;
509} 601}
510
511/** 602/**
512 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 603 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
513 * @vport: pointer to a host virtual N_Port data structure. 604 * @vport: pointer to a host virtual N_Port data structure.
@@ -815,9 +906,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
815 if (sp->cmn.fcphHigh < FC_PH3) 906 if (sp->cmn.fcphHigh < FC_PH3)
816 sp->cmn.fcphHigh = FC_PH3; 907 sp->cmn.fcphHigh = FC_PH3;
817 908
818 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 909 if (phba->sli_rev == LPFC_SLI_REV4) {
910 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
911 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
912 /* FLOGI needs to be 3 for WQE FCFI */
913 /* Set the fcfi to the fcfi we registered with */
914 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
915 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
819 sp->cmn.request_multiple_Nport = 1; 916 sp->cmn.request_multiple_Nport = 1;
820
821 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 917 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
822 icmd->ulpCt_h = 1; 918 icmd->ulpCt_h = 1;
823 icmd->ulpCt_l = 0; 919 icmd->ulpCt_l = 0;
@@ -930,6 +1026,8 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
930 if (!ndlp) 1026 if (!ndlp)
931 return 0; 1027 return 0;
932 lpfc_nlp_init(vport, ndlp, Fabric_DID); 1028 lpfc_nlp_init(vport, ndlp, Fabric_DID);
1029 /* Set the node type */
1030 ndlp->nlp_type |= NLP_FABRIC;
933 /* Put ndlp onto node list */ 1031 /* Put ndlp onto node list */
934 lpfc_enqueue_node(vport, ndlp); 1032 lpfc_enqueue_node(vport, ndlp);
935 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1033 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
@@ -1350,14 +1448,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1350 IOCB_t *icmd; 1448 IOCB_t *icmd;
1351 struct lpfc_nodelist *ndlp; 1449 struct lpfc_nodelist *ndlp;
1352 struct lpfc_iocbq *elsiocb; 1450 struct lpfc_iocbq *elsiocb;
1353 struct lpfc_sli_ring *pring;
1354 struct lpfc_sli *psli; 1451 struct lpfc_sli *psli;
1355 uint8_t *pcmd; 1452 uint8_t *pcmd;
1356 uint16_t cmdsize; 1453 uint16_t cmdsize;
1357 int ret; 1454 int ret;
1358 1455
1359 psli = &phba->sli; 1456 psli = &phba->sli;
1360 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1361 1457
1362 ndlp = lpfc_findnode_did(vport, did); 1458 ndlp = lpfc_findnode_did(vport, did);
1363 if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) 1459 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
@@ -1391,7 +1487,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1391 1487
1392 phba->fc_stat.elsXmitPLOGI++; 1488 phba->fc_stat.elsXmitPLOGI++;
1393 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 1489 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
1394 ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 1490 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1395 1491
1396 if (ret == IOCB_ERROR) { 1492 if (ret == IOCB_ERROR) {
1397 lpfc_els_free_iocb(phba, elsiocb); 1493 lpfc_els_free_iocb(phba, elsiocb);
@@ -1501,14 +1597,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1501 PRLI *npr; 1597 PRLI *npr;
1502 IOCB_t *icmd; 1598 IOCB_t *icmd;
1503 struct lpfc_iocbq *elsiocb; 1599 struct lpfc_iocbq *elsiocb;
1504 struct lpfc_sli_ring *pring;
1505 struct lpfc_sli *psli;
1506 uint8_t *pcmd; 1600 uint8_t *pcmd;
1507 uint16_t cmdsize; 1601 uint16_t cmdsize;
1508 1602
1509 psli = &phba->sli;
1510 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1511
1512 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 1603 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
1513 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1604 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1514 ndlp->nlp_DID, ELS_CMD_PRLI); 1605 ndlp->nlp_DID, ELS_CMD_PRLI);
@@ -1550,7 +1641,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1550 spin_lock_irq(shost->host_lock); 1641 spin_lock_irq(shost->host_lock);
1551 ndlp->nlp_flag |= NLP_PRLI_SND; 1642 ndlp->nlp_flag |= NLP_PRLI_SND;
1552 spin_unlock_irq(shost->host_lock); 1643 spin_unlock_irq(shost->host_lock);
1553 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1644 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1645 IOCB_ERROR) {
1554 spin_lock_irq(shost->host_lock); 1646 spin_lock_irq(shost->host_lock);
1555 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1647 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1556 spin_unlock_irq(shost->host_lock); 1648 spin_unlock_irq(shost->host_lock);
@@ -1608,7 +1700,8 @@ lpfc_adisc_done(struct lpfc_vport *vport)
1608 * and continue discovery. 1700 * and continue discovery.
1609 */ 1701 */
1610 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 1702 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1611 !(vport->fc_flag & FC_RSCN_MODE)) { 1703 !(vport->fc_flag & FC_RSCN_MODE) &&
1704 (phba->sli_rev < LPFC_SLI_REV4)) {
1612 lpfc_issue_reg_vpi(phba, vport); 1705 lpfc_issue_reg_vpi(phba, vport);
1613 return; 1706 return;
1614 } 1707 }
@@ -1788,8 +1881,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1788 ADISC *ap; 1881 ADISC *ap;
1789 IOCB_t *icmd; 1882 IOCB_t *icmd;
1790 struct lpfc_iocbq *elsiocb; 1883 struct lpfc_iocbq *elsiocb;
1791 struct lpfc_sli *psli = &phba->sli;
1792 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1793 uint8_t *pcmd; 1884 uint8_t *pcmd;
1794 uint16_t cmdsize; 1885 uint16_t cmdsize;
1795 1886
@@ -1822,7 +1913,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1822 spin_lock_irq(shost->host_lock); 1913 spin_lock_irq(shost->host_lock);
1823 ndlp->nlp_flag |= NLP_ADISC_SND; 1914 ndlp->nlp_flag |= NLP_ADISC_SND;
1824 spin_unlock_irq(shost->host_lock); 1915 spin_unlock_irq(shost->host_lock);
1825 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1916 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1917 IOCB_ERROR) {
1826 spin_lock_irq(shost->host_lock); 1918 spin_lock_irq(shost->host_lock);
1827 ndlp->nlp_flag &= ~NLP_ADISC_SND; 1919 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1828 spin_unlock_irq(shost->host_lock); 1920 spin_unlock_irq(shost->host_lock);
@@ -1937,15 +2029,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1937 struct lpfc_hba *phba = vport->phba; 2029 struct lpfc_hba *phba = vport->phba;
1938 IOCB_t *icmd; 2030 IOCB_t *icmd;
1939 struct lpfc_iocbq *elsiocb; 2031 struct lpfc_iocbq *elsiocb;
1940 struct lpfc_sli_ring *pring;
1941 struct lpfc_sli *psli;
1942 uint8_t *pcmd; 2032 uint8_t *pcmd;
1943 uint16_t cmdsize; 2033 uint16_t cmdsize;
1944 int rc; 2034 int rc;
1945 2035
1946 psli = &phba->sli;
1947 pring = &psli->ring[LPFC_ELS_RING];
1948
1949 spin_lock_irq(shost->host_lock); 2036 spin_lock_irq(shost->host_lock);
1950 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2037 if (ndlp->nlp_flag & NLP_LOGO_SND) {
1951 spin_unlock_irq(shost->host_lock); 2038 spin_unlock_irq(shost->host_lock);
@@ -1978,7 +2065,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1978 spin_lock_irq(shost->host_lock); 2065 spin_lock_irq(shost->host_lock);
1979 ndlp->nlp_flag |= NLP_LOGO_SND; 2066 ndlp->nlp_flag |= NLP_LOGO_SND;
1980 spin_unlock_irq(shost->host_lock); 2067 spin_unlock_irq(shost->host_lock);
1981 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2068 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1982 2069
1983 if (rc == IOCB_ERROR) { 2070 if (rc == IOCB_ERROR) {
1984 spin_lock_irq(shost->host_lock); 2071 spin_lock_irq(shost->host_lock);
@@ -2058,14 +2145,12 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2058 struct lpfc_hba *phba = vport->phba; 2145 struct lpfc_hba *phba = vport->phba;
2059 IOCB_t *icmd; 2146 IOCB_t *icmd;
2060 struct lpfc_iocbq *elsiocb; 2147 struct lpfc_iocbq *elsiocb;
2061 struct lpfc_sli_ring *pring;
2062 struct lpfc_sli *psli; 2148 struct lpfc_sli *psli;
2063 uint8_t *pcmd; 2149 uint8_t *pcmd;
2064 uint16_t cmdsize; 2150 uint16_t cmdsize;
2065 struct lpfc_nodelist *ndlp; 2151 struct lpfc_nodelist *ndlp;
2066 2152
2067 psli = &phba->sli; 2153 psli = &phba->sli;
2068 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2069 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 2154 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
2070 2155
2071 ndlp = lpfc_findnode_did(vport, nportid); 2156 ndlp = lpfc_findnode_did(vport, nportid);
@@ -2108,7 +2193,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2108 2193
2109 phba->fc_stat.elsXmitSCR++; 2194 phba->fc_stat.elsXmitSCR++;
2110 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2195 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2111 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 2196 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2197 IOCB_ERROR) {
2112 /* The additional lpfc_nlp_put will cause the following 2198 /* The additional lpfc_nlp_put will cause the following
2113 * lpfc_els_free_iocb routine to trigger the rlease of 2199 * lpfc_els_free_iocb routine to trigger the rlease of
2114 * the node. 2200 * the node.
@@ -2152,7 +2238,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2152 struct lpfc_hba *phba = vport->phba; 2238 struct lpfc_hba *phba = vport->phba;
2153 IOCB_t *icmd; 2239 IOCB_t *icmd;
2154 struct lpfc_iocbq *elsiocb; 2240 struct lpfc_iocbq *elsiocb;
2155 struct lpfc_sli_ring *pring;
2156 struct lpfc_sli *psli; 2241 struct lpfc_sli *psli;
2157 FARP *fp; 2242 FARP *fp;
2158 uint8_t *pcmd; 2243 uint8_t *pcmd;
@@ -2162,7 +2247,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2162 struct lpfc_nodelist *ndlp; 2247 struct lpfc_nodelist *ndlp;
2163 2248
2164 psli = &phba->sli; 2249 psli = &phba->sli;
2165 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2166 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 2250 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
2167 2251
2168 ndlp = lpfc_findnode_did(vport, nportid); 2252 ndlp = lpfc_findnode_did(vport, nportid);
@@ -2219,7 +2303,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2219 2303
2220 phba->fc_stat.elsXmitFARPR++; 2304 phba->fc_stat.elsXmitFARPR++;
2221 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2305 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2222 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 2306 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2307 IOCB_ERROR) {
2223 /* The additional lpfc_nlp_put will cause the following 2308 /* The additional lpfc_nlp_put will cause the following
2224 * lpfc_els_free_iocb routine to trigger the release of 2309 * lpfc_els_free_iocb routine to trigger the release of
2225 * the node. 2310 * the node.
@@ -2949,6 +3034,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2949 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3034 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2950 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3035 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2951 3036
3037 /*
3038 * This routine is used to register and unregister in previous SLI
3039 * modes.
3040 */
3041 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
3042 (phba->sli_rev == LPFC_SLI_REV4))
3043 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
3044
2952 pmb->context1 = NULL; 3045 pmb->context1 = NULL;
2953 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3046 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2954 kfree(mp); 3047 kfree(mp);
@@ -2961,6 +3054,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2961 */ 3054 */
2962 lpfc_nlp_not_used(ndlp); 3055 lpfc_nlp_not_used(ndlp);
2963 } 3056 }
3057
2964 return; 3058 return;
2965} 3059}
2966 3060
@@ -3170,7 +3264,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3170 IOCB_t *icmd; 3264 IOCB_t *icmd;
3171 IOCB_t *oldcmd; 3265 IOCB_t *oldcmd;
3172 struct lpfc_iocbq *elsiocb; 3266 struct lpfc_iocbq *elsiocb;
3173 struct lpfc_sli_ring *pring;
3174 struct lpfc_sli *psli; 3267 struct lpfc_sli *psli;
3175 uint8_t *pcmd; 3268 uint8_t *pcmd;
3176 uint16_t cmdsize; 3269 uint16_t cmdsize;
@@ -3178,7 +3271,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3178 ELS_PKT *els_pkt_ptr; 3271 ELS_PKT *els_pkt_ptr;
3179 3272
3180 psli = &phba->sli; 3273 psli = &phba->sli;
3181 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3182 oldcmd = &oldiocb->iocb; 3274 oldcmd = &oldiocb->iocb;
3183 3275
3184 switch (flag) { 3276 switch (flag) {
@@ -3266,7 +3358,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3266 } 3358 }
3267 3359
3268 phba->fc_stat.elsXmitACC++; 3360 phba->fc_stat.elsXmitACC++;
3269 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3361 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3270 if (rc == IOCB_ERROR) { 3362 if (rc == IOCB_ERROR) {
3271 lpfc_els_free_iocb(phba, elsiocb); 3363 lpfc_els_free_iocb(phba, elsiocb);
3272 return 1; 3364 return 1;
@@ -3305,15 +3397,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3305 IOCB_t *icmd; 3397 IOCB_t *icmd;
3306 IOCB_t *oldcmd; 3398 IOCB_t *oldcmd;
3307 struct lpfc_iocbq *elsiocb; 3399 struct lpfc_iocbq *elsiocb;
3308 struct lpfc_sli_ring *pring;
3309 struct lpfc_sli *psli; 3400 struct lpfc_sli *psli;
3310 uint8_t *pcmd; 3401 uint8_t *pcmd;
3311 uint16_t cmdsize; 3402 uint16_t cmdsize;
3312 int rc; 3403 int rc;
3313 3404
3314 psli = &phba->sli; 3405 psli = &phba->sli;
3315 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3316
3317 cmdsize = 2 * sizeof(uint32_t); 3406 cmdsize = 2 * sizeof(uint32_t);
3318 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3407 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3319 ndlp->nlp_DID, ELS_CMD_LS_RJT); 3408 ndlp->nlp_DID, ELS_CMD_LS_RJT);
@@ -3346,7 +3435,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3346 3435
3347 phba->fc_stat.elsXmitLSRJT++; 3436 phba->fc_stat.elsXmitLSRJT++;
3348 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3437 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3349 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3438 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3350 3439
3351 if (rc == IOCB_ERROR) { 3440 if (rc == IOCB_ERROR) {
3352 lpfc_els_free_iocb(phba, elsiocb); 3441 lpfc_els_free_iocb(phba, elsiocb);
@@ -3379,8 +3468,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3379 struct lpfc_nodelist *ndlp) 3468 struct lpfc_nodelist *ndlp)
3380{ 3469{
3381 struct lpfc_hba *phba = vport->phba; 3470 struct lpfc_hba *phba = vport->phba;
3382 struct lpfc_sli *psli = &phba->sli;
3383 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
3384 ADISC *ap; 3471 ADISC *ap;
3385 IOCB_t *icmd, *oldcmd; 3472 IOCB_t *icmd, *oldcmd;
3386 struct lpfc_iocbq *elsiocb; 3473 struct lpfc_iocbq *elsiocb;
@@ -3422,7 +3509,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3422 3509
3423 phba->fc_stat.elsXmitACC++; 3510 phba->fc_stat.elsXmitACC++;
3424 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3511 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3425 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3512 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3426 if (rc == IOCB_ERROR) { 3513 if (rc == IOCB_ERROR) {
3427 lpfc_els_free_iocb(phba, elsiocb); 3514 lpfc_els_free_iocb(phba, elsiocb);
3428 return 1; 3515 return 1;
@@ -3459,14 +3546,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3459 IOCB_t *icmd; 3546 IOCB_t *icmd;
3460 IOCB_t *oldcmd; 3547 IOCB_t *oldcmd;
3461 struct lpfc_iocbq *elsiocb; 3548 struct lpfc_iocbq *elsiocb;
3462 struct lpfc_sli_ring *pring;
3463 struct lpfc_sli *psli; 3549 struct lpfc_sli *psli;
3464 uint8_t *pcmd; 3550 uint8_t *pcmd;
3465 uint16_t cmdsize; 3551 uint16_t cmdsize;
3466 int rc; 3552 int rc;
3467 3553
3468 psli = &phba->sli; 3554 psli = &phba->sli;
3469 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3470 3555
3471 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 3556 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
3472 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3557 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -3520,7 +3605,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3520 phba->fc_stat.elsXmitACC++; 3605 phba->fc_stat.elsXmitACC++;
3521 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3606 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3522 3607
3523 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3608 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3524 if (rc == IOCB_ERROR) { 3609 if (rc == IOCB_ERROR) {
3525 lpfc_els_free_iocb(phba, elsiocb); 3610 lpfc_els_free_iocb(phba, elsiocb);
3526 return 1; 3611 return 1;
@@ -3562,15 +3647,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3562 RNID *rn; 3647 RNID *rn;
3563 IOCB_t *icmd, *oldcmd; 3648 IOCB_t *icmd, *oldcmd;
3564 struct lpfc_iocbq *elsiocb; 3649 struct lpfc_iocbq *elsiocb;
3565 struct lpfc_sli_ring *pring;
3566 struct lpfc_sli *psli; 3650 struct lpfc_sli *psli;
3567 uint8_t *pcmd; 3651 uint8_t *pcmd;
3568 uint16_t cmdsize; 3652 uint16_t cmdsize;
3569 int rc; 3653 int rc;
3570 3654
3571 psli = &phba->sli; 3655 psli = &phba->sli;
3572 pring = &psli->ring[LPFC_ELS_RING];
3573
3574 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 3656 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
3575 + (2 * sizeof(struct lpfc_name)); 3657 + (2 * sizeof(struct lpfc_name));
3576 if (format) 3658 if (format)
@@ -3626,7 +3708,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3626 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, 3708 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
3627 * it could be freed */ 3709 * it could be freed */
3628 3710
3629 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3711 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3630 if (rc == IOCB_ERROR) { 3712 if (rc == IOCB_ERROR) {
3631 lpfc_els_free_iocb(phba, elsiocb); 3713 lpfc_els_free_iocb(phba, elsiocb);
3632 return 1; 3714 return 1;
@@ -3839,7 +3921,9 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
3839 payload_len -= sizeof(uint32_t); 3921 payload_len -= sizeof(uint32_t);
3840 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 3922 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
3841 case RSCN_ADDRESS_FORMAT_PORT: 3923 case RSCN_ADDRESS_FORMAT_PORT:
3842 if (ns_did.un.word == rscn_did.un.word) 3924 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
3925 && (ns_did.un.b.area == rscn_did.un.b.area)
3926 && (ns_did.un.b.id == rscn_did.un.b.id))
3843 goto return_did_out; 3927 goto return_did_out;
3844 break; 3928 break;
3845 case RSCN_ADDRESS_FORMAT_AREA: 3929 case RSCN_ADDRESS_FORMAT_AREA:
@@ -4300,7 +4384,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4300 lpfc_init_link(phba, mbox, 4384 lpfc_init_link(phba, mbox,
4301 phba->cfg_topology, 4385 phba->cfg_topology,
4302 phba->cfg_link_speed); 4386 phba->cfg_link_speed);
4303 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 4387 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
4304 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4388 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4305 mbox->vport = vport; 4389 mbox->vport = vport;
4306 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4390 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4440,8 +4524,6 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4440static void 4524static void
4441lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4525lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4442{ 4526{
4443 struct lpfc_sli *psli = &phba->sli;
4444 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
4445 MAILBOX_t *mb; 4527 MAILBOX_t *mb;
4446 IOCB_t *icmd; 4528 IOCB_t *icmd;
4447 RPS_RSP *rps_rsp; 4529 RPS_RSP *rps_rsp;
@@ -4451,7 +4533,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4451 uint16_t xri, status; 4533 uint16_t xri, status;
4452 uint32_t cmdsize; 4534 uint32_t cmdsize;
4453 4535
4454 mb = &pmb->mb; 4536 mb = &pmb->u.mb;
4455 4537
4456 ndlp = (struct lpfc_nodelist *) pmb->context2; 4538 ndlp = (struct lpfc_nodelist *) pmb->context2;
4457 xri = (uint16_t) ((unsigned long)(pmb->context1)); 4539 xri = (uint16_t) ((unsigned long)(pmb->context1));
@@ -4507,7 +4589,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4507 ndlp->nlp_rpi); 4589 ndlp->nlp_rpi);
4508 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4590 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4509 phba->fc_stat.elsXmitACC++; 4591 phba->fc_stat.elsXmitACC++;
4510 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) 4592 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
4511 lpfc_els_free_iocb(phba, elsiocb); 4593 lpfc_els_free_iocb(phba, elsiocb);
4512 return; 4594 return;
4513} 4595}
@@ -4616,8 +4698,6 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4616 IOCB_t *icmd, *oldcmd; 4698 IOCB_t *icmd, *oldcmd;
4617 RPL_RSP rpl_rsp; 4699 RPL_RSP rpl_rsp;
4618 struct lpfc_iocbq *elsiocb; 4700 struct lpfc_iocbq *elsiocb;
4619 struct lpfc_sli *psli = &phba->sli;
4620 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
4621 uint8_t *pcmd; 4701 uint8_t *pcmd;
4622 4702
4623 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4703 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -4654,7 +4734,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4654 ndlp->nlp_rpi); 4734 ndlp->nlp_rpi);
4655 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4735 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4656 phba->fc_stat.elsXmitACC++; 4736 phba->fc_stat.elsXmitACC++;
4657 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 4737 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
4738 IOCB_ERROR) {
4658 lpfc_els_free_iocb(phba, elsiocb); 4739 lpfc_els_free_iocb(phba, elsiocb);
4659 return 1; 4740 return 1;
4660 } 4741 }
@@ -4883,7 +4964,10 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4883 } else { 4964 } else {
4884 /* FAN verified - skip FLOGI */ 4965 /* FAN verified - skip FLOGI */
4885 vport->fc_myDID = vport->fc_prevDID; 4966 vport->fc_myDID = vport->fc_prevDID;
4886 lpfc_issue_fabric_reglogin(vport); 4967 if (phba->sli_rev < LPFC_SLI_REV4)
4968 lpfc_issue_fabric_reglogin(vport);
4969 else
4970 lpfc_issue_reg_vfi(vport);
4887 } 4971 }
4888 } 4972 }
4889 return 0; 4973 return 0;
@@ -5566,11 +5650,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5566 5650
5567dropit: 5651dropit:
5568 if (vport && !(vport->load_flag & FC_UNLOADING)) 5652 if (vport && !(vport->load_flag & FC_UNLOADING))
5569 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 5653 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5570 "(%d):0111 Dropping received ELS cmd " 5654 "0111 Dropping received ELS cmd "
5571 "Data: x%x x%x x%x\n", 5655 "Data: x%x x%x x%x\n",
5572 vport->vpi, icmd->ulpStatus, 5656 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
5573 icmd->un.ulpWord[4], icmd->ulpTimeout);
5574 phba->fc_stat.elsRcvDrop++; 5657 phba->fc_stat.elsRcvDrop++;
5575} 5658}
5576 5659
@@ -5646,10 +5729,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5646 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 5729 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
5647 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 5730 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
5648 vport = phba->pport; 5731 vport = phba->pport;
5649 else { 5732 else
5650 uint16_t vpi = icmd->unsli3.rcvsli3.vpi; 5733 vport = lpfc_find_vport_by_vpid(phba,
5651 vport = lpfc_find_vport_by_vpid(phba, vpi); 5734 icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
5652 }
5653 } 5735 }
5654 /* If there are no BDEs associated 5736 /* If there are no BDEs associated
5655 * with this IOCB, there is nothing to do. 5737 * with this IOCB, there is nothing to do.
@@ -5781,7 +5863,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5781 struct lpfc_vport *vport = pmb->vport; 5863 struct lpfc_vport *vport = pmb->vport;
5782 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5864 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5783 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 5865 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
5784 MAILBOX_t *mb = &pmb->mb; 5866 MAILBOX_t *mb = &pmb->u.mb;
5785 5867
5786 spin_lock_irq(shost->host_lock); 5868 spin_lock_irq(shost->host_lock);
5787 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 5869 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@@ -5818,7 +5900,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5818 5900
5819 } else { 5901 } else {
5820 if (vport == phba->pport) 5902 if (vport == phba->pport)
5821 lpfc_issue_fabric_reglogin(vport); 5903 if (phba->sli_rev < LPFC_SLI_REV4)
5904 lpfc_issue_fabric_reglogin(vport);
5905 else
5906 lpfc_issue_reg_vfi(vport);
5822 else 5907 else
5823 lpfc_do_scr_ns_plogi(phba, vport); 5908 lpfc_do_scr_ns_plogi(phba, vport);
5824 } 5909 }
@@ -5850,7 +5935,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
5850 5935
5851 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5936 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5852 if (mbox) { 5937 if (mbox) {
5853 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox); 5938 lpfc_reg_vpi(vport, mbox);
5854 mbox->vport = vport; 5939 mbox->vport = vport;
5855 mbox->context2 = lpfc_nlp_get(ndlp); 5940 mbox->context2 = lpfc_nlp_get(ndlp);
5856 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 5941 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
@@ -6036,9 +6121,17 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6036 icmd->un.elsreq64.myID = 0; 6121 icmd->un.elsreq64.myID = 0;
6037 icmd->un.elsreq64.fl = 1; 6122 icmd->un.elsreq64.fl = 1;
6038 6123
6039 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */ 6124 if (phba->sli_rev == LPFC_SLI_REV4) {
6040 icmd->ulpCt_h = 1; 6125 /* FDISC needs to be 1 for WQE VPI */
6041 icmd->ulpCt_l = 0; 6126 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
6127 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
6128 /* Set the ulpContext to the vpi */
6129 elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
6130 } else {
6131 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
6132 icmd->ulpCt_h = 1;
6133 icmd->ulpCt_l = 0;
6134 }
6042 6135
6043 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6136 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6044 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 6137 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
@@ -6139,7 +6232,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6139{ 6232{
6140 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6233 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6141 struct lpfc_hba *phba = vport->phba; 6234 struct lpfc_hba *phba = vport->phba;
6142 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6143 IOCB_t *icmd; 6235 IOCB_t *icmd;
6144 struct lpfc_iocbq *elsiocb; 6236 struct lpfc_iocbq *elsiocb;
6145 uint8_t *pcmd; 6237 uint8_t *pcmd;
@@ -6169,7 +6261,8 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6169 spin_lock_irq(shost->host_lock); 6261 spin_lock_irq(shost->host_lock);
6170 ndlp->nlp_flag |= NLP_LOGO_SND; 6262 ndlp->nlp_flag |= NLP_LOGO_SND;
6171 spin_unlock_irq(shost->host_lock); 6263 spin_unlock_irq(shost->host_lock);
6172 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 6264 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
6265 IOCB_ERROR) {
6173 spin_lock_irq(shost->host_lock); 6266 spin_lock_irq(shost->host_lock);
6174 ndlp->nlp_flag &= ~NLP_LOGO_SND; 6267 ndlp->nlp_flag &= ~NLP_LOGO_SND;
6175 spin_unlock_irq(shost->host_lock); 6268 spin_unlock_irq(shost->host_lock);
@@ -6224,7 +6317,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
6224 struct lpfc_iocbq *iocb; 6317 struct lpfc_iocbq *iocb;
6225 unsigned long iflags; 6318 unsigned long iflags;
6226 int ret; 6319 int ret;
6227 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6228 IOCB_t *cmd; 6320 IOCB_t *cmd;
6229 6321
6230repeat: 6322repeat:
@@ -6248,7 +6340,7 @@ repeat:
6248 "Fabric sched1: ste:x%x", 6340 "Fabric sched1: ste:x%x",
6249 iocb->vport->port_state, 0, 0); 6341 iocb->vport->port_state, 0, 0);
6250 6342
6251 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 6343 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6252 6344
6253 if (ret == IOCB_ERROR) { 6345 if (ret == IOCB_ERROR) {
6254 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 6346 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6394,7 +6486,6 @@ static int
6394lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 6486lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6395{ 6487{
6396 unsigned long iflags; 6488 unsigned long iflags;
6397 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6398 int ready; 6489 int ready;
6399 int ret; 6490 int ret;
6400 6491
@@ -6418,7 +6509,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6418 "Fabric sched2: ste:x%x", 6509 "Fabric sched2: ste:x%x",
6419 iocb->vport->port_state, 0, 0); 6510 iocb->vport->port_state, 0, 0);
6420 6511
6421 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 6512 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6422 6513
6423 if (ret == IOCB_ERROR) { 6514 if (ret == IOCB_ERROR) {
6424 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 6515 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6524,3 +6615,38 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
6524 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6615 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6525 IOERR_SLI_ABORTED); 6616 IOERR_SLI_ABORTED);
6526} 6617}
6618
6619/**
6620 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
6621 * @phba: pointer to lpfc hba data structure.
6622 * @axri: pointer to the els xri abort wcqe structure.
6623 *
6624 * This routine is invoked by the worker thread to process a SLI4 slow-path
6625 * ELS aborted xri.
6626 **/
6627void
6628lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6629 struct sli4_wcqe_xri_aborted *axri)
6630{
6631 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
6632 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6633 unsigned long iflag = 0;
6634
6635 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag);
6636 list_for_each_entry_safe(sglq_entry, sglq_next,
6637 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
6638 if (sglq_entry->sli4_xritag == xri) {
6639 list_del(&sglq_entry->list);
6640 spin_unlock_irqrestore(
6641 &phba->sli4_hba.abts_sgl_list_lock,
6642 iflag);
6643 spin_lock_irqsave(&phba->hbalock, iflag);
6644
6645 list_add_tail(&sglq_entry->list,
6646 &phba->sli4_hba.lpfc_sgl_list);
6647 spin_unlock_irqrestore(&phba->hbalock, iflag);
6648 return;
6649 }
6650 }
6651 spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag);
6652}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e764ce0bf704..ed46b24a3380 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -29,10 +29,12 @@
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h> 30#include <scsi/scsi_transport_fc.h>
31 31
32#include "lpfc_hw4.h"
32#include "lpfc_hw.h" 33#include "lpfc_hw.h"
33#include "lpfc_nl.h" 34#include "lpfc_nl.h"
34#include "lpfc_disc.h" 35#include "lpfc_disc.h"
35#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
36#include "lpfc_scsi.h" 38#include "lpfc_scsi.h"
37#include "lpfc.h" 39#include "lpfc.h"
38#include "lpfc_logmsg.h" 40#include "lpfc_logmsg.h"
@@ -273,6 +275,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
273 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 275 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
274 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 276 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
275 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 277 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
278
279 lpfc_unregister_unused_fcf(phba);
276} 280}
277 281
278/** 282/**
@@ -295,10 +299,11 @@ lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
295 299
296 ret = kzalloc(sizeof(struct lpfc_fast_path_event), 300 ret = kzalloc(sizeof(struct lpfc_fast_path_event),
297 GFP_ATOMIC); 301 GFP_ATOMIC);
298 if (ret) 302 if (ret) {
299 atomic_inc(&phba->fast_event_count); 303 atomic_inc(&phba->fast_event_count);
300 INIT_LIST_HEAD(&ret->work_evt.evt_listp); 304 INIT_LIST_HEAD(&ret->work_evt.evt_listp);
301 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 305 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
306 }
302 return ret; 307 return ret;
303} 308}
304 309
@@ -491,6 +496,10 @@ lpfc_work_done(struct lpfc_hba *phba)
491 phba->work_ha = 0; 496 phba->work_ha = 0;
492 spin_unlock_irq(&phba->hbalock); 497 spin_unlock_irq(&phba->hbalock);
493 498
499 /* First, try to post the next mailbox command to SLI4 device */
500 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
501 lpfc_sli4_post_async_mbox(phba);
502
494 if (ha_copy & HA_ERATT) 503 if (ha_copy & HA_ERATT)
495 /* Handle the error attention event */ 504 /* Handle the error attention event */
496 lpfc_handle_eratt(phba); 505 lpfc_handle_eratt(phba);
@@ -501,9 +510,27 @@ lpfc_work_done(struct lpfc_hba *phba)
501 if (ha_copy & HA_LATT) 510 if (ha_copy & HA_LATT)
502 lpfc_handle_latt(phba); 511 lpfc_handle_latt(phba);
503 512
513 /* Process SLI4 events */
514 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
515 if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
516 lpfc_sli4_fcp_xri_abort_event_proc(phba);
517 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
518 lpfc_sli4_els_xri_abort_event_proc(phba);
519 if (phba->hba_flag & ASYNC_EVENT)
520 lpfc_sli4_async_event_proc(phba);
521 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
522 spin_lock_irq(&phba->hbalock);
523 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
524 spin_unlock_irq(&phba->hbalock);
525 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
526 }
527 if (phba->hba_flag & HBA_RECEIVE_BUFFER)
528 lpfc_sli4_handle_received_buffer(phba);
529 }
530
504 vports = lpfc_create_vport_work_array(phba); 531 vports = lpfc_create_vport_work_array(phba);
505 if (vports != NULL) 532 if (vports != NULL)
506 for(i = 0; i <= phba->max_vpi; i++) { 533 for (i = 0; i <= phba->max_vports; i++) {
507 /* 534 /*
508 * We could have no vports in array if unloading, so if 535 * We could have no vports in array if unloading, so if
509 * this happens then just use the pport 536 * this happens then just use the pport
@@ -555,23 +582,24 @@ lpfc_work_done(struct lpfc_hba *phba)
555 /* 582 /*
556 * Turn on Ring interrupts 583 * Turn on Ring interrupts
557 */ 584 */
558 spin_lock_irq(&phba->hbalock); 585 if (phba->sli_rev <= LPFC_SLI_REV3) {
559 control = readl(phba->HCregaddr); 586 spin_lock_irq(&phba->hbalock);
560 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { 587 control = readl(phba->HCregaddr);
561 lpfc_debugfs_slow_ring_trc(phba, 588 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
562 "WRK Enable ring: cntl:x%x hacopy:x%x", 589 lpfc_debugfs_slow_ring_trc(phba,
563 control, ha_copy, 0); 590 "WRK Enable ring: cntl:x%x hacopy:x%x",
564 591 control, ha_copy, 0);
565 control |= (HC_R0INT_ENA << LPFC_ELS_RING); 592
566 writel(control, phba->HCregaddr); 593 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
567 readl(phba->HCregaddr); /* flush */ 594 writel(control, phba->HCregaddr);
568 } 595 readl(phba->HCregaddr); /* flush */
569 else { 596 } else {
570 lpfc_debugfs_slow_ring_trc(phba, 597 lpfc_debugfs_slow_ring_trc(phba,
571 "WRK Ring ok: cntl:x%x hacopy:x%x", 598 "WRK Ring ok: cntl:x%x hacopy:x%x",
572 control, ha_copy, 0); 599 control, ha_copy, 0);
600 }
601 spin_unlock_irq(&phba->hbalock);
573 } 602 }
574 spin_unlock_irq(&phba->hbalock);
575 } 603 }
576 lpfc_work_list_done(phba); 604 lpfc_work_list_done(phba);
577} 605}
@@ -689,7 +717,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
689 lpfc_can_disctmo(vport); 717 lpfc_can_disctmo(vport);
690} 718}
691 719
692static void 720void
693lpfc_linkdown_port(struct lpfc_vport *vport) 721lpfc_linkdown_port(struct lpfc_vport *vport)
694{ 722{
695 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 723 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -716,6 +744,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
716 if (phba->link_state == LPFC_LINK_DOWN) 744 if (phba->link_state == LPFC_LINK_DOWN)
717 return 0; 745 return 0;
718 spin_lock_irq(&phba->hbalock); 746 spin_lock_irq(&phba->hbalock);
747 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
719 if (phba->link_state > LPFC_LINK_DOWN) { 748 if (phba->link_state > LPFC_LINK_DOWN) {
720 phba->link_state = LPFC_LINK_DOWN; 749 phba->link_state = LPFC_LINK_DOWN;
721 phba->pport->fc_flag &= ~FC_LBIT; 750 phba->pport->fc_flag &= ~FC_LBIT;
@@ -723,7 +752,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
723 spin_unlock_irq(&phba->hbalock); 752 spin_unlock_irq(&phba->hbalock);
724 vports = lpfc_create_vport_work_array(phba); 753 vports = lpfc_create_vport_work_array(phba);
725 if (vports != NULL) 754 if (vports != NULL)
726 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 755 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
727 /* Issue a LINK DOWN event to all nodes */ 756 /* Issue a LINK DOWN event to all nodes */
728 lpfc_linkdown_port(vports[i]); 757 lpfc_linkdown_port(vports[i]);
729 } 758 }
@@ -833,10 +862,11 @@ lpfc_linkup(struct lpfc_hba *phba)
833 862
834 vports = lpfc_create_vport_work_array(phba); 863 vports = lpfc_create_vport_work_array(phba);
835 if (vports != NULL) 864 if (vports != NULL)
836 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 865 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
837 lpfc_linkup_port(vports[i]); 866 lpfc_linkup_port(vports[i]);
838 lpfc_destroy_vport_work_array(phba, vports); 867 lpfc_destroy_vport_work_array(phba, vports);
839 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 868 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
869 (phba->sli_rev < LPFC_SLI_REV4))
840 lpfc_issue_clear_la(phba, phba->pport); 870 lpfc_issue_clear_la(phba, phba->pport);
841 871
842 return 0; 872 return 0;
@@ -854,7 +884,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
854 struct lpfc_vport *vport = pmb->vport; 884 struct lpfc_vport *vport = pmb->vport;
855 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 885 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
856 struct lpfc_sli *psli = &phba->sli; 886 struct lpfc_sli *psli = &phba->sli;
857 MAILBOX_t *mb = &pmb->mb; 887 MAILBOX_t *mb = &pmb->u.mb;
858 uint32_t control; 888 uint32_t control;
859 889
860 /* Since we don't do discovery right now, turn these off here */ 890 /* Since we don't do discovery right now, turn these off here */
@@ -917,7 +947,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
917{ 947{
918 struct lpfc_vport *vport = pmb->vport; 948 struct lpfc_vport *vport = pmb->vport;
919 949
920 if (pmb->mb.mbxStatus) 950 if (pmb->u.mb.mbxStatus)
921 goto out; 951 goto out;
922 952
923 mempool_free(pmb, phba->mbox_mem_pool); 953 mempool_free(pmb, phba->mbox_mem_pool);
@@ -945,7 +975,7 @@ out:
945 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 975 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
946 "0306 CONFIG_LINK mbxStatus error x%x " 976 "0306 CONFIG_LINK mbxStatus error x%x "
947 "HBA state x%x\n", 977 "HBA state x%x\n",
948 pmb->mb.mbxStatus, vport->port_state); 978 pmb->u.mb.mbxStatus, vport->port_state);
949 mempool_free(pmb, phba->mbox_mem_pool); 979 mempool_free(pmb, phba->mbox_mem_pool);
950 980
951 lpfc_linkdown(phba); 981 lpfc_linkdown(phba);
@@ -959,9 +989,612 @@ out:
959} 989}
960 990
961static void 991static void
992lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
993{
994 struct lpfc_vport *vport = mboxq->vport;
995 unsigned long flags;
996
997 if (mboxq->u.mb.mbxStatus) {
998 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
999 "2017 REG_FCFI mbxStatus error x%x "
1000 "HBA state x%x\n",
1001 mboxq->u.mb.mbxStatus, vport->port_state);
1002 mempool_free(mboxq, phba->mbox_mem_pool);
1003 return;
1004 }
1005
1006 /* Start FCoE discovery by sending a FLOGI. */
1007 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1008 /* Set the FCFI registered flag */
1009 spin_lock_irqsave(&phba->hbalock, flags);
1010 phba->fcf.fcf_flag |= FCF_REGISTERED;
1011 spin_unlock_irqrestore(&phba->hbalock, flags);
1012 if (vport->port_state != LPFC_FLOGI) {
1013 spin_lock_irqsave(&phba->hbalock, flags);
1014 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1015 spin_unlock_irqrestore(&phba->hbalock, flags);
1016 lpfc_initial_flogi(vport);
1017 }
1018
1019 mempool_free(mboxq, phba->mbox_mem_pool);
1020 return;
1021}
1022
1023/**
1024 * lpfc_fab_name_match - Check if the fcf fabric name match.
1025 * @fab_name: pointer to fabric name.
1026 * @new_fcf_record: pointer to fcf record.
1027 *
1028 * This routine compare the fcf record's fabric name with provided
1029 * fabric name. If the fabric name are identical this function
1030 * returns 1 else return 0.
1031 **/
1032static uint32_t
1033lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1034{
1035 if ((fab_name[0] ==
1036 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) &&
1037 (fab_name[1] ==
1038 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) &&
1039 (fab_name[2] ==
1040 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) &&
1041 (fab_name[3] ==
1042 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) &&
1043 (fab_name[4] ==
1044 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) &&
1045 (fab_name[5] ==
1046 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) &&
1047 (fab_name[6] ==
1048 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) &&
1049 (fab_name[7] ==
1050 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)))
1051 return 1;
1052 else
1053 return 0;
1054}
1055
1056/**
1057 * lpfc_mac_addr_match - Check if the fcf mac address match.
1058 * @phba: pointer to lpfc hba data structure.
1059 * @new_fcf_record: pointer to fcf record.
1060 *
1061 * This routine compare the fcf record's mac address with HBA's
1062 * FCF mac address. If the mac addresses are identical this function
1063 * returns 1 else return 0.
1064 **/
1065static uint32_t
1066lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1067{
1068 if ((phba->fcf.mac_addr[0] ==
1069 bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) &&
1070 (phba->fcf.mac_addr[1] ==
1071 bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) &&
1072 (phba->fcf.mac_addr[2] ==
1073 bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) &&
1074 (phba->fcf.mac_addr[3] ==
1075 bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) &&
1076 (phba->fcf.mac_addr[4] ==
1077 bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) &&
1078 (phba->fcf.mac_addr[5] ==
1079 bf_get(lpfc_fcf_record_mac_5, new_fcf_record)))
1080 return 1;
1081 else
1082 return 0;
1083}
1084
1085/**
1086 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1087 * @phba: pointer to lpfc hba data structure.
1088 * @new_fcf_record: pointer to fcf record.
1089 *
1090 * This routine copies the FCF information from the FCF
1091 * record to lpfc_hba data structure.
1092 **/
1093static void
1094lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1095{
1096 phba->fcf.fabric_name[0] =
1097 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1098 phba->fcf.fabric_name[1] =
1099 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1100 phba->fcf.fabric_name[2] =
1101 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1102 phba->fcf.fabric_name[3] =
1103 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1104 phba->fcf.fabric_name[4] =
1105 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1106 phba->fcf.fabric_name[5] =
1107 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1108 phba->fcf.fabric_name[6] =
1109 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1110 phba->fcf.fabric_name[7] =
1111 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1112 phba->fcf.mac_addr[0] =
1113 bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1114 phba->fcf.mac_addr[1] =
1115 bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1116 phba->fcf.mac_addr[2] =
1117 bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1118 phba->fcf.mac_addr[3] =
1119 bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1120 phba->fcf.mac_addr[4] =
1121 bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1122 phba->fcf.mac_addr[5] =
1123 bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1124 phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1125 phba->fcf.priority = new_fcf_record->fip_priority;
1126}
1127
1128/**
1129 * lpfc_register_fcf - Register the FCF with hba.
1130 * @phba: pointer to lpfc hba data structure.
1131 *
1132 * This routine issues a register fcfi mailbox command to register
1133 * the fcf with HBA.
1134 **/
1135static void
1136lpfc_register_fcf(struct lpfc_hba *phba)
1137{
1138 LPFC_MBOXQ_t *fcf_mbxq;
1139 int rc;
1140 unsigned long flags;
1141
1142 spin_lock_irqsave(&phba->hbalock, flags);
1143
1144 /* If the FCF is not availabe do nothing. */
1145 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1146 spin_unlock_irqrestore(&phba->hbalock, flags);
1147 return;
1148 }
1149
1150 /* The FCF is already registered, start discovery */
1151 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1152 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1153 spin_unlock_irqrestore(&phba->hbalock, flags);
1154 if (phba->pport->port_state != LPFC_FLOGI)
1155 lpfc_initial_flogi(phba->pport);
1156 return;
1157 }
1158 spin_unlock_irqrestore(&phba->hbalock, flags);
1159
1160 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
1161 GFP_KERNEL);
1162 if (!fcf_mbxq)
1163 return;
1164
1165 lpfc_reg_fcfi(phba, fcf_mbxq);
1166 fcf_mbxq->vport = phba->pport;
1167 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1168 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1169 if (rc == MBX_NOT_FINISHED)
1170 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1171
1172 return;
1173}
1174
1175/**
1176 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1177 * @phba: pointer to lpfc hba data structure.
1178 * @new_fcf_record: pointer to fcf record.
1179 * @boot_flag: Indicates if this record used by boot bios.
1180 * @addr_mode: The address mode to be used by this FCF
1181 *
1182 * This routine compare the fcf record with connect list obtained from the
1183 * config region to decide if this FCF can be used for SAN discovery. It returns
1184 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1185 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1186 * is used by boot bios and addr_mode will indicate the addressing mode to be
1187 * used for this FCF when the function returns.
1188 * If the FCF record need to be used with a particular vlan id, the vlan is
1189 * set in the vlan_id on return of the function. If not VLAN tagging need to
1190 * be used with the FCF vlan_id will be set to 0xFFFF;
1191 **/
1192static int
1193lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1194 struct fcf_record *new_fcf_record,
1195 uint32_t *boot_flag, uint32_t *addr_mode,
1196 uint16_t *vlan_id)
1197{
1198 struct lpfc_fcf_conn_entry *conn_entry;
1199
1200 /* If FCF not available return 0 */
1201 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1202 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record))
1203 return 0;
1204
1205 if (!phba->cfg_enable_fip) {
1206 *boot_flag = 0;
1207 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1208 new_fcf_record);
1209 if (phba->valid_vlan)
1210 *vlan_id = phba->vlan_id;
1211 else
1212 *vlan_id = 0xFFFF;
1213 return 1;
1214 }
1215
1216 /*
1217 * If there are no FCF connection table entry, driver connect to all
1218 * FCFs.
1219 */
1220 if (list_empty(&phba->fcf_conn_rec_list)) {
1221 *boot_flag = 0;
1222 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1223 new_fcf_record);
1224
1225 /*
1226 * When there are no FCF connect entries, use driver's default
1227 * addressing mode - FPMA.
1228 */
1229 if (*addr_mode & LPFC_FCF_FPMA)
1230 *addr_mode = LPFC_FCF_FPMA;
1231
1232 *vlan_id = 0xFFFF;
1233 return 1;
1234 }
1235
1236 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) {
1237 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1238 continue;
1239
1240 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1241 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1242 new_fcf_record))
1243 continue;
1244
1245 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1246 /*
1247 * If the vlan bit map does not have the bit set for the
1248 * vlan id to be used, then it is not a match.
1249 */
1250 if (!(new_fcf_record->vlan_bitmap
1251 [conn_entry->conn_rec.vlan_tag / 8] &
1252 (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1253 continue;
1254 }
1255
1256 /*
1257 * If connection record does not support any addressing mode,
1258 * skip the FCF record.
1259 */
1260 if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1261 & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1262 continue;
1263
1264 /*
1265 * Check if the connection record specifies a required
1266 * addressing mode.
1267 */
1268 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1269 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1270
1271 /*
1272 * If SPMA required but FCF not support this continue.
1273 */
1274 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1275 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1276 new_fcf_record) & LPFC_FCF_SPMA))
1277 continue;
1278
1279 /*
1280 * If FPMA required but FCF not support this continue.
1281 */
1282 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1283 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1284 new_fcf_record) & LPFC_FCF_FPMA))
1285 continue;
1286 }
1287
1288 /*
1289 * This fcf record matches filtering criteria.
1290 */
1291 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1292 *boot_flag = 1;
1293 else
1294 *boot_flag = 0;
1295
1296 /*
1297 * If user did not specify any addressing mode, or if the
1298 * prefered addressing mode specified by user is not supported
1299 * by FCF, allow fabric to pick the addressing mode.
1300 */
1301 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1302 new_fcf_record);
1303 /*
1304 * If the user specified a required address mode, assign that
1305 * address mode
1306 */
1307 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1308 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1309 *addr_mode = (conn_entry->conn_rec.flags &
1310 FCFCNCT_AM_SPMA) ?
1311 LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1312 /*
1313 * If the user specified a prefered address mode, use the
1314 * addr mode only if FCF support the addr_mode.
1315 */
1316 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1317 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1318 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1319 (*addr_mode & LPFC_FCF_SPMA))
1320 *addr_mode = LPFC_FCF_SPMA;
1321 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1322 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1323 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1324 (*addr_mode & LPFC_FCF_FPMA))
1325 *addr_mode = LPFC_FCF_FPMA;
1326
1327 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1328 *vlan_id = conn_entry->conn_rec.vlan_tag;
1329 else
1330 *vlan_id = 0xFFFF;
1331
1332 return 1;
1333 }
1334
1335 return 0;
1336}
1337
1338/**
1339 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
1340 * @phba: pointer to lpfc hba data structure.
1341 * @mboxq: pointer to mailbox object.
1342 *
1343 * This function iterate through all the fcf records available in
1344 * HBA and choose the optimal FCF record for discovery. After finding
1345 * the FCF for discovery it register the FCF record and kick start
1346 * discovery.
1347 * If FCF_IN_USE flag is set in currently used FCF, the routine try to
1348 * use a FCF record which match fabric name and mac address of the
1349 * currently used FCF record.
1350 * If the driver support only one FCF, it will try to use the FCF record
1351 * used by BOOT_BIOS.
1352 */
1353void
1354lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1355{
1356 void *virt_addr;
1357 dma_addr_t phys_addr;
1358 uint8_t *bytep;
1359 struct lpfc_mbx_sge sge;
1360 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1361 uint32_t shdr_status, shdr_add_status;
1362 union lpfc_sli4_cfg_shdr *shdr;
1363 struct fcf_record *new_fcf_record;
1364 int rc;
1365 uint32_t boot_flag, addr_mode;
1366 uint32_t next_fcf_index;
1367 unsigned long flags;
1368 uint16_t vlan_id;
1369
1370 /* Get the first SGE entry from the non-embedded DMA memory. This
1371 * routine only uses a single SGE.
1372 */
1373 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1374 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
1375 if (unlikely(!mboxq->sge_array)) {
1376 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1377 "2524 Failed to get the non-embedded SGE "
1378 "virtual address\n");
1379 goto out;
1380 }
1381 virt_addr = mboxq->sge_array->addr[0];
1382
1383 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1384 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1385 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
1386 &shdr->response);
1387 /*
1388 * The FCF Record was read and there is no reason for the driver
1389 * to maintain the FCF record data or memory. Instead, just need
1390 * to book keeping the FCFIs can be used.
1391 */
1392 if (shdr_status || shdr_add_status) {
1393 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1394 "2521 READ_FCF_RECORD mailbox failed "
1395 "with status x%x add_status x%x, mbx\n",
1396 shdr_status, shdr_add_status);
1397 goto out;
1398 }
1399 /* Interpreting the returned information of FCF records */
1400 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1401 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1402 sizeof(struct lpfc_mbx_read_fcf_tbl));
1403 next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1404
1405 new_fcf_record = (struct fcf_record *)(virt_addr +
1406 sizeof(struct lpfc_mbx_read_fcf_tbl));
1407 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1408 sizeof(struct fcf_record));
1409 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1410
1411 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record,
1412 &boot_flag, &addr_mode,
1413 &vlan_id);
1414 /*
1415 * If the fcf record does not match with connect list entries
1416 * read the next entry.
1417 */
1418 if (!rc)
1419 goto read_next_fcf;
1420 /*
1421 * If this is not the first FCF discovery of the HBA, use last
1422 * FCF record for the discovery.
1423 */
1424 spin_lock_irqsave(&phba->hbalock, flags);
1425 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1426 if (lpfc_fab_name_match(phba->fcf.fabric_name,
1427 new_fcf_record) &&
1428 lpfc_mac_addr_match(phba, new_fcf_record)) {
1429 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1430 spin_unlock_irqrestore(&phba->hbalock, flags);
1431 goto out;
1432 }
1433 spin_unlock_irqrestore(&phba->hbalock, flags);
1434 goto read_next_fcf;
1435 }
1436 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
1437 /*
1438 * If the current FCF record does not have boot flag
1439 * set and new fcf record has boot flag set, use the
1440 * new fcf record.
1441 */
1442 if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
1443 /* Use this FCF record */
1444 lpfc_copy_fcf_record(phba, new_fcf_record);
1445 phba->fcf.addr_mode = addr_mode;
1446 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
1447 if (vlan_id != 0xFFFF) {
1448 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1449 phba->fcf.vlan_id = vlan_id;
1450 }
1451 spin_unlock_irqrestore(&phba->hbalock, flags);
1452 goto read_next_fcf;
1453 }
1454 /*
1455 * If the current FCF record has boot flag set and the
1456 * new FCF record does not have boot flag, read the next
1457 * FCF record.
1458 */
1459 if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
1460 spin_unlock_irqrestore(&phba->hbalock, flags);
1461 goto read_next_fcf;
1462 }
1463 /*
1464 * If there is a record with lower priority value for
1465 * the current FCF, use that record.
1466 */
1467 if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record)
1468 && (new_fcf_record->fip_priority <
1469 phba->fcf.priority)) {
1470 /* Use this FCF record */
1471 lpfc_copy_fcf_record(phba, new_fcf_record);
1472 phba->fcf.addr_mode = addr_mode;
1473 if (vlan_id != 0xFFFF) {
1474 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1475 phba->fcf.vlan_id = vlan_id;
1476 }
1477 spin_unlock_irqrestore(&phba->hbalock, flags);
1478 goto read_next_fcf;
1479 }
1480 spin_unlock_irqrestore(&phba->hbalock, flags);
1481 goto read_next_fcf;
1482 }
1483 /*
1484 * This is the first available FCF record, use this
1485 * record.
1486 */
1487 lpfc_copy_fcf_record(phba, new_fcf_record);
1488 phba->fcf.addr_mode = addr_mode;
1489 if (boot_flag)
1490 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
1491 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1492 if (vlan_id != 0xFFFF) {
1493 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1494 phba->fcf.vlan_id = vlan_id;
1495 }
1496 spin_unlock_irqrestore(&phba->hbalock, flags);
1497 goto read_next_fcf;
1498
1499read_next_fcf:
1500 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1501 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0)
1502 lpfc_register_fcf(phba);
1503 else
1504 lpfc_sli4_read_fcf_record(phba, next_fcf_index);
1505 return;
1506
1507out:
1508 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1509 lpfc_register_fcf(phba);
1510
1511 return;
1512}
1513
1514/**
1515 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
1516 * @phba: pointer to lpfc hba data structure.
1517 *
1518 * This function loops through the list of vports on the @phba and issues an
1519 * FDISC if possible.
1520 */
1521void
1522lpfc_start_fdiscs(struct lpfc_hba *phba)
1523{
1524 struct lpfc_vport **vports;
1525 int i;
1526
1527 vports = lpfc_create_vport_work_array(phba);
1528 if (vports != NULL) {
1529 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1530 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1531 continue;
1532 /* There are no vpi for this vport */
1533 if (vports[i]->vpi > phba->max_vpi) {
1534 lpfc_vport_set_state(vports[i],
1535 FC_VPORT_FAILED);
1536 continue;
1537 }
1538 if (phba->fc_topology == TOPOLOGY_LOOP) {
1539 lpfc_vport_set_state(vports[i],
1540 FC_VPORT_LINKDOWN);
1541 continue;
1542 }
1543 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1544 lpfc_initial_fdisc(vports[i]);
1545 else {
1546 lpfc_vport_set_state(vports[i],
1547 FC_VPORT_NO_FABRIC_SUPP);
1548 lpfc_printf_vlog(vports[i], KERN_ERR,
1549 LOG_ELS,
1550 "0259 No NPIV "
1551 "Fabric support\n");
1552 }
1553 }
1554 }
1555 lpfc_destroy_vport_work_array(phba, vports);
1556}
1557
1558void
1559lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1560{
1561 struct lpfc_dmabuf *dmabuf = mboxq->context1;
1562 struct lpfc_vport *vport = mboxq->vport;
1563
1564 if (mboxq->u.mb.mbxStatus) {
1565 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1566 "2018 REG_VFI mbxStatus error x%x "
1567 "HBA state x%x\n",
1568 mboxq->u.mb.mbxStatus, vport->port_state);
1569 if (phba->fc_topology == TOPOLOGY_LOOP) {
1570 /* FLOGI failed, use loop map to make discovery list */
1571 lpfc_disc_list_loopmap(vport);
1572 /* Start discovery */
1573 lpfc_disc_start(vport);
1574 goto fail_free_mem;
1575 }
1576 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1577 goto fail_free_mem;
1578 }
1579 /* Mark the vport has registered with its VFI */
1580 vport->vfi_state |= LPFC_VFI_REGISTERED;
1581
1582 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1583 lpfc_start_fdiscs(phba);
1584 lpfc_do_scr_ns_plogi(phba, vport);
1585 }
1586
1587fail_free_mem:
1588 mempool_free(mboxq, phba->mbox_mem_pool);
1589 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
1590 kfree(dmabuf);
1591 return;
1592}
1593
1594static void
962lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1595lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
963{ 1596{
964 MAILBOX_t *mb = &pmb->mb; 1597 MAILBOX_t *mb = &pmb->u.mb;
965 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 1598 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
966 struct lpfc_vport *vport = pmb->vport; 1599 struct lpfc_vport *vport = pmb->vport;
967 1600
@@ -1012,13 +1645,13 @@ static void
1012lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) 1645lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1013{ 1646{
1014 struct lpfc_vport *vport = phba->pport; 1647 struct lpfc_vport *vport = phba->pport;
1015 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; 1648 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
1016 int i; 1649 int i;
1017 struct lpfc_dmabuf *mp; 1650 struct lpfc_dmabuf *mp;
1018 int rc; 1651 int rc;
1652 struct fcf_record *fcf_record;
1019 1653
1020 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1654 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1021 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1022 1655
1023 spin_lock_irq(&phba->hbalock); 1656 spin_lock_irq(&phba->hbalock);
1024 switch (la->UlnkSpeed) { 1657 switch (la->UlnkSpeed) {
@@ -1034,6 +1667,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1034 case LA_8GHZ_LINK: 1667 case LA_8GHZ_LINK:
1035 phba->fc_linkspeed = LA_8GHZ_LINK; 1668 phba->fc_linkspeed = LA_8GHZ_LINK;
1036 break; 1669 break;
1670 case LA_10GHZ_LINK:
1671 phba->fc_linkspeed = LA_10GHZ_LINK;
1672 break;
1037 default: 1673 default:
1038 phba->fc_linkspeed = LA_UNKNW_LINK; 1674 phba->fc_linkspeed = LA_UNKNW_LINK;
1039 break; 1675 break;
@@ -1115,22 +1751,66 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1115 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1751 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1116 kfree(mp); 1752 kfree(mp);
1117 mempool_free(sparam_mbox, phba->mbox_mem_pool); 1753 mempool_free(sparam_mbox, phba->mbox_mem_pool);
1118 if (cfglink_mbox)
1119 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1120 goto out; 1754 goto out;
1121 } 1755 }
1122 } 1756 }
1123 1757
1124 if (cfglink_mbox) { 1758 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
1759 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1760 if (!cfglink_mbox)
1761 goto out;
1125 vport->port_state = LPFC_LOCAL_CFG_LINK; 1762 vport->port_state = LPFC_LOCAL_CFG_LINK;
1126 lpfc_config_link(phba, cfglink_mbox); 1763 lpfc_config_link(phba, cfglink_mbox);
1127 cfglink_mbox->vport = vport; 1764 cfglink_mbox->vport = vport;
1128 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 1765 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1129 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); 1766 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1130 if (rc != MBX_NOT_FINISHED) 1767 if (rc == MBX_NOT_FINISHED) {
1131 return; 1768 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1132 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 1769 goto out;
1770 }
1771 } else {
1772 /*
1773 * Add the driver's default FCF record at FCF index 0 now. This
1774 * is phase 1 implementation that support FCF index 0 and driver
1775 * defaults.
1776 */
1777 if (phba->cfg_enable_fip == 0) {
1778 fcf_record = kzalloc(sizeof(struct fcf_record),
1779 GFP_KERNEL);
1780 if (unlikely(!fcf_record)) {
1781 lpfc_printf_log(phba, KERN_ERR,
1782 LOG_MBOX | LOG_SLI,
1783 "2554 Could not allocate memmory for "
1784 "fcf record\n");
1785 rc = -ENODEV;
1786 goto out;
1787 }
1788
1789 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
1790 LPFC_FCOE_FCF_DEF_INDEX);
1791 rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
1792 if (unlikely(rc)) {
1793 lpfc_printf_log(phba, KERN_ERR,
1794 LOG_MBOX | LOG_SLI,
1795 "2013 Could not manually add FCF "
1796 "record 0, status %d\n", rc);
1797 rc = -ENODEV;
1798 kfree(fcf_record);
1799 goto out;
1800 }
1801 kfree(fcf_record);
1802 }
1803 /*
1804 * The driver is expected to do FIP/FCF. Call the port
1805 * and get the FCF Table.
1806 */
1807 rc = lpfc_sli4_read_fcf_record(phba,
1808 LPFC_FCOE_FCF_GET_FIRST);
1809 if (rc)
1810 goto out;
1133 } 1811 }
1812
1813 return;
1134out: 1814out:
1135 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1815 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1136 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1816 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1147,10 +1827,12 @@ lpfc_enable_la(struct lpfc_hba *phba)
1147 struct lpfc_sli *psli = &phba->sli; 1827 struct lpfc_sli *psli = &phba->sli;
1148 spin_lock_irq(&phba->hbalock); 1828 spin_lock_irq(&phba->hbalock);
1149 psli->sli_flag |= LPFC_PROCESS_LA; 1829 psli->sli_flag |= LPFC_PROCESS_LA;
1150 control = readl(phba->HCregaddr); 1830 if (phba->sli_rev <= LPFC_SLI_REV3) {
1151 control |= HC_LAINT_ENA; 1831 control = readl(phba->HCregaddr);
1152 writel(control, phba->HCregaddr); 1832 control |= HC_LAINT_ENA;
1153 readl(phba->HCregaddr); /* flush */ 1833 writel(control, phba->HCregaddr);
1834 readl(phba->HCregaddr); /* flush */
1835 }
1154 spin_unlock_irq(&phba->hbalock); 1836 spin_unlock_irq(&phba->hbalock);
1155} 1837}
1156 1838
@@ -1159,6 +1841,7 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1159{ 1841{
1160 lpfc_linkdown(phba); 1842 lpfc_linkdown(phba);
1161 lpfc_enable_la(phba); 1843 lpfc_enable_la(phba);
1844 lpfc_unregister_unused_fcf(phba);
1162 /* turn on Link Attention interrupts - no CLEAR_LA needed */ 1845 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1163} 1846}
1164 1847
@@ -1175,7 +1858,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1175 struct lpfc_vport *vport = pmb->vport; 1858 struct lpfc_vport *vport = pmb->vport;
1176 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1859 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1177 READ_LA_VAR *la; 1860 READ_LA_VAR *la;
1178 MAILBOX_t *mb = &pmb->mb; 1861 MAILBOX_t *mb = &pmb->u.mb;
1179 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 1862 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1180 1863
1181 /* Unblock ELS traffic */ 1864 /* Unblock ELS traffic */
@@ -1190,7 +1873,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1190 goto lpfc_mbx_cmpl_read_la_free_mbuf; 1873 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1191 } 1874 }
1192 1875
1193 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA; 1876 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
1194 1877
1195 memcpy(&phba->alpa_map[0], mp->virt, 128); 1878 memcpy(&phba->alpa_map[0], mp->virt, 128);
1196 1879
@@ -1201,7 +1884,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1201 vport->fc_flag &= ~FC_BYPASSED_MODE; 1884 vport->fc_flag &= ~FC_BYPASSED_MODE;
1202 spin_unlock_irq(shost->host_lock); 1885 spin_unlock_irq(shost->host_lock);
1203 1886
1204 if (((phba->fc_eventTag + 1) < la->eventTag) || 1887 if ((phba->fc_eventTag < la->eventTag) ||
1205 (phba->fc_eventTag == la->eventTag)) { 1888 (phba->fc_eventTag == la->eventTag)) {
1206 phba->fc_stat.LinkMultiEvent++; 1889 phba->fc_stat.LinkMultiEvent++;
1207 if (la->attType == AT_LINK_UP) 1890 if (la->attType == AT_LINK_UP)
@@ -1328,7 +2011,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1328static void 2011static void
1329lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2012lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1330{ 2013{
1331 MAILBOX_t *mb = &pmb->mb; 2014 MAILBOX_t *mb = &pmb->u.mb;
1332 struct lpfc_vport *vport = pmb->vport; 2015 struct lpfc_vport *vport = pmb->vport;
1333 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2016 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1334 2017
@@ -1381,7 +2064,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1381{ 2064{
1382 struct lpfc_vport *vport = pmb->vport; 2065 struct lpfc_vport *vport = pmb->vport;
1383 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2066 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1384 MAILBOX_t *mb = &pmb->mb; 2067 MAILBOX_t *mb = &pmb->u.mb;
1385 2068
1386 switch (mb->mbxStatus) { 2069 switch (mb->mbxStatus) {
1387 case 0x0011: 2070 case 0x0011:
@@ -1416,6 +2099,128 @@ out:
1416 return; 2099 return;
1417} 2100}
1418 2101
2102/**
2103 * lpfc_create_static_vport - Read HBA config region to create static vports.
2104 * @phba: pointer to lpfc hba data structure.
2105 *
2106 * This routine issue a DUMP mailbox command for config region 22 to get
2107 * the list of static vports to be created. The function create vports
2108 * based on the information returned from the HBA.
2109 **/
2110void
2111lpfc_create_static_vport(struct lpfc_hba *phba)
2112{
2113 LPFC_MBOXQ_t *pmb = NULL;
2114 MAILBOX_t *mb;
2115 struct static_vport_info *vport_info;
2116 int rc, i;
2117 struct fc_vport_identifiers vport_id;
2118 struct fc_vport *new_fc_vport;
2119 struct Scsi_Host *shost;
2120 struct lpfc_vport *vport;
2121 uint16_t offset = 0;
2122 uint8_t *vport_buff;
2123
2124 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2125 if (!pmb) {
2126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2127 "0542 lpfc_create_static_vport failed to"
2128 " allocate mailbox memory\n");
2129 return;
2130 }
2131
2132 mb = &pmb->u.mb;
2133
2134 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
2135 if (!vport_info) {
2136 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2137 "0543 lpfc_create_static_vport failed to"
2138 " allocate vport_info\n");
2139 mempool_free(pmb, phba->mbox_mem_pool);
2140 return;
2141 }
2142
2143 vport_buff = (uint8_t *) vport_info;
2144 do {
2145 lpfc_dump_static_vport(phba, pmb, offset);
2146 pmb->vport = phba->pport;
2147 rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
2148
2149 if ((rc != MBX_SUCCESS) || mb->mbxStatus) {
2150 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2151 "0544 lpfc_create_static_vport failed to"
2152 " issue dump mailbox command ret 0x%x "
2153 "status 0x%x\n",
2154 rc, mb->mbxStatus);
2155 goto out;
2156 }
2157
2158 if (mb->un.varDmp.word_cnt >
2159 sizeof(struct static_vport_info) - offset)
2160 mb->un.varDmp.word_cnt =
2161 sizeof(struct static_vport_info) - offset;
2162
2163 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
2164 vport_buff + offset,
2165 mb->un.varDmp.word_cnt);
2166 offset += mb->un.varDmp.word_cnt;
2167
2168 } while (mb->un.varDmp.word_cnt &&
2169 offset < sizeof(struct static_vport_info));
2170
2171
2172 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
2173 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
2174 != VPORT_INFO_REV)) {
2175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2176 "0545 lpfc_create_static_vport bad"
2177 " information header 0x%x 0x%x\n",
2178 le32_to_cpu(vport_info->signature),
2179 le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
2180
2181 goto out;
2182 }
2183
2184 shost = lpfc_shost_from_vport(phba->pport);
2185
2186 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
2187 memset(&vport_id, 0, sizeof(vport_id));
2188 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
2189 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
2190 if (!vport_id.port_name || !vport_id.node_name)
2191 continue;
2192
2193 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
2194 vport_id.vport_type = FC_PORTTYPE_NPIV;
2195 vport_id.disable = false;
2196 new_fc_vport = fc_vport_create(shost, 0, &vport_id);
2197
2198 if (!new_fc_vport) {
2199 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2200 "0546 lpfc_create_static_vport failed to"
2201 " create vport \n");
2202 continue;
2203 }
2204
2205 vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
2206 vport->vport_flag |= STATIC_VPORT;
2207 }
2208
2209out:
2210 /*
2211 * If this is timed out command, setting NULL to context2 tell SLI
2212 * layer not to use this buffer.
2213 */
2214 spin_lock_irq(&phba->hbalock);
2215 pmb->context2 = NULL;
2216 spin_unlock_irq(&phba->hbalock);
2217 kfree(vport_info);
2218 if (rc != MBX_TIMEOUT)
2219 mempool_free(pmb, phba->mbox_mem_pool);
2220
2221 return;
2222}
2223
1419/* 2224/*
1420 * This routine handles processing a Fabric REG_LOGIN mailbox 2225 * This routine handles processing a Fabric REG_LOGIN mailbox
1421 * command upon completion. It is setup in the LPFC_MBOXQ 2226 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -1426,16 +2231,17 @@ void
1426lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2231lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1427{ 2232{
1428 struct lpfc_vport *vport = pmb->vport; 2233 struct lpfc_vport *vport = pmb->vport;
1429 MAILBOX_t *mb = &pmb->mb; 2234 MAILBOX_t *mb = &pmb->u.mb;
1430 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2235 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1431 struct lpfc_nodelist *ndlp; 2236 struct lpfc_nodelist *ndlp;
1432 struct lpfc_vport **vports;
1433 int i;
1434 2237
1435 ndlp = (struct lpfc_nodelist *) pmb->context2; 2238 ndlp = (struct lpfc_nodelist *) pmb->context2;
1436 pmb->context1 = NULL; 2239 pmb->context1 = NULL;
1437 pmb->context2 = NULL; 2240 pmb->context2 = NULL;
1438 if (mb->mbxStatus) { 2241 if (mb->mbxStatus) {
2242 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2243 "0258 Register Fabric login error: 0x%x\n",
2244 mb->mbxStatus);
1439 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2245 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1440 kfree(mp); 2246 kfree(mp);
1441 mempool_free(pmb, phba->mbox_mem_pool); 2247 mempool_free(pmb, phba->mbox_mem_pool);
@@ -1454,9 +2260,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1454 } 2260 }
1455 2261
1456 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2262 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1457 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1458 "0258 Register Fabric login error: 0x%x\n",
1459 mb->mbxStatus);
1460 /* Decrement the reference count to ndlp after the reference 2263 /* Decrement the reference count to ndlp after the reference
1461 * to the ndlp are done. 2264 * to the ndlp are done.
1462 */ 2265 */
@@ -1465,34 +2268,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1465 } 2268 }
1466 2269
1467 ndlp->nlp_rpi = mb->un.varWords[0]; 2270 ndlp->nlp_rpi = mb->un.varWords[0];
2271 ndlp->nlp_flag |= NLP_RPI_VALID;
1468 ndlp->nlp_type |= NLP_FABRIC; 2272 ndlp->nlp_type |= NLP_FABRIC;
1469 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2273 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1470 2274
1471 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2275 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1472 vports = lpfc_create_vport_work_array(phba); 2276 lpfc_start_fdiscs(phba);
1473 if (vports != NULL)
1474 for(i = 0;
1475 i <= phba->max_vpi && vports[i] != NULL;
1476 i++) {
1477 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1478 continue;
1479 if (phba->fc_topology == TOPOLOGY_LOOP) {
1480 lpfc_vport_set_state(vports[i],
1481 FC_VPORT_LINKDOWN);
1482 continue;
1483 }
1484 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1485 lpfc_initial_fdisc(vports[i]);
1486 else {
1487 lpfc_vport_set_state(vports[i],
1488 FC_VPORT_NO_FABRIC_SUPP);
1489 lpfc_printf_vlog(vport, KERN_ERR,
1490 LOG_ELS,
1491 "0259 No NPIV "
1492 "Fabric support\n");
1493 }
1494 }
1495 lpfc_destroy_vport_work_array(phba, vports);
1496 lpfc_do_scr_ns_plogi(phba, vport); 2277 lpfc_do_scr_ns_plogi(phba, vport);
1497 } 2278 }
1498 2279
@@ -1516,13 +2297,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1516void 2297void
1517lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2298lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1518{ 2299{
1519 MAILBOX_t *mb = &pmb->mb; 2300 MAILBOX_t *mb = &pmb->u.mb;
1520 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2301 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1521 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 2302 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1522 struct lpfc_vport *vport = pmb->vport; 2303 struct lpfc_vport *vport = pmb->vport;
1523 2304
1524 if (mb->mbxStatus) { 2305 if (mb->mbxStatus) {
1525out: 2306out:
2307 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2308 "0260 Register NameServer error: 0x%x\n",
2309 mb->mbxStatus);
1526 /* decrement the node reference count held for this 2310 /* decrement the node reference count held for this
1527 * callback function. 2311 * callback function.
1528 */ 2312 */
@@ -1546,15 +2330,13 @@ out:
1546 return; 2330 return;
1547 } 2331 }
1548 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2332 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1549 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1550 "0260 Register NameServer error: 0x%x\n",
1551 mb->mbxStatus);
1552 return; 2333 return;
1553 } 2334 }
1554 2335
1555 pmb->context1 = NULL; 2336 pmb->context1 = NULL;
1556 2337
1557 ndlp->nlp_rpi = mb->un.varWords[0]; 2338 ndlp->nlp_rpi = mb->un.varWords[0];
2339 ndlp->nlp_flag |= NLP_RPI_VALID;
1558 ndlp->nlp_type |= NLP_FABRIC; 2340 ndlp->nlp_type |= NLP_FABRIC;
1559 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2341 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1560 2342
@@ -2055,7 +2837,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
2055 if (pring->ringno == LPFC_ELS_RING) { 2837 if (pring->ringno == LPFC_ELS_RING) {
2056 switch (icmd->ulpCommand) { 2838 switch (icmd->ulpCommand) {
2057 case CMD_GEN_REQUEST64_CR: 2839 case CMD_GEN_REQUEST64_CR:
2058 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) 2840 if (iocb->context_un.ndlp == ndlp)
2059 return 1; 2841 return 1;
2060 case CMD_ELS_REQUEST64_CR: 2842 case CMD_ELS_REQUEST64_CR:
2061 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) 2843 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
@@ -2102,7 +2884,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2102 */ 2884 */
2103 psli = &phba->sli; 2885 psli = &phba->sli;
2104 rpi = ndlp->nlp_rpi; 2886 rpi = ndlp->nlp_rpi;
2105 if (rpi) { 2887 if (ndlp->nlp_flag & NLP_RPI_VALID) {
2106 /* Now process each ring */ 2888 /* Now process each ring */
2107 for (i = 0; i < psli->num_rings; i++) { 2889 for (i = 0; i < psli->num_rings; i++) {
2108 pring = &psli->ring[i]; 2890 pring = &psli->ring[i];
@@ -2150,7 +2932,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2150 LPFC_MBOXQ_t *mbox; 2932 LPFC_MBOXQ_t *mbox;
2151 int rc; 2933 int rc;
2152 2934
2153 if (ndlp->nlp_rpi) { 2935 if (ndlp->nlp_flag & NLP_RPI_VALID) {
2154 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2936 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2155 if (mbox) { 2937 if (mbox) {
2156 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); 2938 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
@@ -2162,6 +2944,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2162 } 2944 }
2163 lpfc_no_rpi(phba, ndlp); 2945 lpfc_no_rpi(phba, ndlp);
2164 ndlp->nlp_rpi = 0; 2946 ndlp->nlp_rpi = 0;
2947 ndlp->nlp_flag &= ~NLP_RPI_VALID;
2948 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2165 return 1; 2949 return 1;
2166 } 2950 }
2167 return 0; 2951 return 0;
@@ -2252,7 +3036,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2252 3036
2253 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 3037 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
2254 if ((mb = phba->sli.mbox_active)) { 3038 if ((mb = phba->sli.mbox_active)) {
2255 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 3039 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
2256 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 3040 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
2257 mb->context2 = NULL; 3041 mb->context2 = NULL;
2258 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3042 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -2261,7 +3045,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2261 3045
2262 spin_lock_irq(&phba->hbalock); 3046 spin_lock_irq(&phba->hbalock);
2263 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 3047 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
2264 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 3048 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
2265 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 3049 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
2266 mp = (struct lpfc_dmabuf *) (mb->context1); 3050 mp = (struct lpfc_dmabuf *) (mb->context1);
2267 if (mp) { 3051 if (mp) {
@@ -2309,13 +3093,14 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2309 int rc; 3093 int rc;
2310 3094
2311 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3095 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2312 if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { 3096 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
3097 !(ndlp->nlp_flag & NLP_RPI_VALID)) {
2313 /* For this case we need to cleanup the default rpi 3098 /* For this case we need to cleanup the default rpi
2314 * allocated by the firmware. 3099 * allocated by the firmware.
2315 */ 3100 */
2316 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) 3101 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
2317 != NULL) { 3102 != NULL) {
2318 rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID, 3103 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
2319 (uint8_t *) &vport->fc_sparam, mbox, 0); 3104 (uint8_t *) &vport->fc_sparam, mbox, 0);
2320 if (rc) { 3105 if (rc) {
2321 mempool_free(mbox, phba->mbox_mem_pool); 3106 mempool_free(mbox, phba->mbox_mem_pool);
@@ -2553,7 +3338,8 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2553 * clear_la then don't send it. 3338 * clear_la then don't send it.
2554 */ 3339 */
2555 if ((phba->link_state >= LPFC_CLEAR_LA) || 3340 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2556 (vport->port_type != LPFC_PHYSICAL_PORT)) 3341 (vport->port_type != LPFC_PHYSICAL_PORT) ||
3342 (phba->sli_rev == LPFC_SLI_REV4))
2557 return; 3343 return;
2558 3344
2559 /* Link up discovery */ 3345 /* Link up discovery */
@@ -2582,7 +3368,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2582 3368
2583 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3369 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2584 if (regvpimbox) { 3370 if (regvpimbox) {
2585 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); 3371 lpfc_reg_vpi(vport, regvpimbox);
2586 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; 3372 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2587 regvpimbox->vport = vport; 3373 regvpimbox->vport = vport;
2588 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) 3374 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
@@ -2642,7 +3428,8 @@ lpfc_disc_start(struct lpfc_vport *vport)
2642 */ 3428 */
2643 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3429 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2644 !(vport->fc_flag & FC_PT2PT) && 3430 !(vport->fc_flag & FC_PT2PT) &&
2645 !(vport->fc_flag & FC_RSCN_MODE)) { 3431 !(vport->fc_flag & FC_RSCN_MODE) &&
3432 (phba->sli_rev < LPFC_SLI_REV4)) {
2646 lpfc_issue_reg_vpi(phba, vport); 3433 lpfc_issue_reg_vpi(phba, vport);
2647 return; 3434 return;
2648 } 3435 }
@@ -2919,11 +3706,13 @@ restart_disc:
2919 * set port_state to PORT_READY if SLI2. 3706 * set port_state to PORT_READY if SLI2.
2920 * cmpl_reg_vpi will set port_state to READY for SLI3. 3707 * cmpl_reg_vpi will set port_state to READY for SLI3.
2921 */ 3708 */
2922 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3709 if (phba->sli_rev < LPFC_SLI_REV4) {
2923 lpfc_issue_reg_vpi(phba, vport); 3710 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2924 else { /* NPIV Not enabled */ 3711 lpfc_issue_reg_vpi(phba, vport);
2925 lpfc_issue_clear_la(phba, vport); 3712 else { /* NPIV Not enabled */
2926 vport->port_state = LPFC_VPORT_READY; 3713 lpfc_issue_clear_la(phba, vport);
3714 vport->port_state = LPFC_VPORT_READY;
3715 }
2927 } 3716 }
2928 3717
2929 /* Setup and issue mailbox INITIALIZE LINK command */ 3718 /* Setup and issue mailbox INITIALIZE LINK command */
@@ -2939,7 +3728,7 @@ restart_disc:
2939 lpfc_linkdown(phba); 3728 lpfc_linkdown(phba);
2940 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, 3729 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2941 phba->cfg_link_speed); 3730 phba->cfg_link_speed);
2942 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 3731 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
2943 initlinkmbox->vport = vport; 3732 initlinkmbox->vport = vport;
2944 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3733 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2945 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); 3734 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
@@ -2959,11 +3748,13 @@ restart_disc:
2959 * set port_state to PORT_READY if SLI2. 3748 * set port_state to PORT_READY if SLI2.
2960 * cmpl_reg_vpi will set port_state to READY for SLI3. 3749 * cmpl_reg_vpi will set port_state to READY for SLI3.
2961 */ 3750 */
2962 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3751 if (phba->sli_rev < LPFC_SLI_REV4) {
2963 lpfc_issue_reg_vpi(phba, vport); 3752 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2964 else { /* NPIV Not enabled */ 3753 lpfc_issue_reg_vpi(phba, vport);
2965 lpfc_issue_clear_la(phba, vport); 3754 else { /* NPIV Not enabled */
2966 vport->port_state = LPFC_VPORT_READY; 3755 lpfc_issue_clear_la(phba, vport);
3756 vport->port_state = LPFC_VPORT_READY;
3757 }
2967 } 3758 }
2968 break; 3759 break;
2969 3760
@@ -3036,7 +3827,7 @@ restart_disc:
3036void 3827void
3037lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3828lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3038{ 3829{
3039 MAILBOX_t *mb = &pmb->mb; 3830 MAILBOX_t *mb = &pmb->u.mb;
3040 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3831 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3041 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3832 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3042 struct lpfc_vport *vport = pmb->vport; 3833 struct lpfc_vport *vport = pmb->vport;
@@ -3044,6 +3835,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3044 pmb->context1 = NULL; 3835 pmb->context1 = NULL;
3045 3836
3046 ndlp->nlp_rpi = mb->un.varWords[0]; 3837 ndlp->nlp_rpi = mb->un.varWords[0];
3838 ndlp->nlp_flag |= NLP_RPI_VALID;
3047 ndlp->nlp_type |= NLP_FABRIC; 3839 ndlp->nlp_type |= NLP_FABRIC;
3048 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3840 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3049 3841
@@ -3297,3 +4089,395 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
3297 return 1; 4089 return 1;
3298 return 0; 4090 return 0;
3299} 4091}
4092
4093/**
4094 * lpfc_fcf_inuse - Check if FCF can be unregistered.
4095 * @phba: Pointer to hba context object.
4096 *
4097 * This function iterate through all FC nodes associated
4098 * will all vports to check if there is any node with
4099 * fc_rports associated with it. If there is an fc_rport
4100 * associated with the node, then the node is either in
4101 * discovered state or its devloss_timer is pending.
4102 */
4103static int
4104lpfc_fcf_inuse(struct lpfc_hba *phba)
4105{
4106 struct lpfc_vport **vports;
4107 int i, ret = 0;
4108 struct lpfc_nodelist *ndlp;
4109 struct Scsi_Host *shost;
4110
4111 vports = lpfc_create_vport_work_array(phba);
4112
4113 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4114 shost = lpfc_shost_from_vport(vports[i]);
4115 spin_lock_irq(shost->host_lock);
4116 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
4117 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
4118 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
4119 ret = 1;
4120 spin_unlock_irq(shost->host_lock);
4121 goto out;
4122 }
4123 }
4124 spin_unlock_irq(shost->host_lock);
4125 }
4126out:
4127 lpfc_destroy_vport_work_array(phba, vports);
4128 return ret;
4129}
4130
4131/**
4132 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
4133 * @phba: Pointer to hba context object.
4134 * @mboxq: Pointer to mailbox object.
4135 *
4136 * This function frees memory associated with the mailbox command.
4137 */
4138static void
4139lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4140{
4141 struct lpfc_vport *vport = mboxq->vport;
4142
4143 if (mboxq->u.mb.mbxStatus) {
4144 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4145 "2555 UNREG_VFI mbxStatus error x%x "
4146 "HBA state x%x\n",
4147 mboxq->u.mb.mbxStatus, vport->port_state);
4148 }
4149 mempool_free(mboxq, phba->mbox_mem_pool);
4150 return;
4151}
4152
4153/**
4154 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
4155 * @phba: Pointer to hba context object.
4156 * @mboxq: Pointer to mailbox object.
4157 *
4158 * This function frees memory associated with the mailbox command.
4159 */
4160static void
4161lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4162{
4163 struct lpfc_vport *vport = mboxq->vport;
4164
4165 if (mboxq->u.mb.mbxStatus) {
4166 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4167 "2550 UNREG_FCFI mbxStatus error x%x "
4168 "HBA state x%x\n",
4169 mboxq->u.mb.mbxStatus, vport->port_state);
4170 }
4171 mempool_free(mboxq, phba->mbox_mem_pool);
4172 return;
4173}
4174
4175/**
4176 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
4177 * @phba: Pointer to hba context object.
4178 *
4179 * This function check if there are any connected remote port for the FCF and
4180 * if all the devices are disconnected, this function unregister FCFI.
4181 * This function also tries to use another FCF for discovery.
4182 */
4183void
4184lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4185{
4186 LPFC_MBOXQ_t *mbox;
4187 int rc;
4188 struct lpfc_vport **vports;
4189 int i;
4190
4191 spin_lock_irq(&phba->hbalock);
4192 /*
4193 * If HBA is not running in FIP mode or
4194 * If HBA does not support FCoE or
4195 * If FCF is not registered.
4196 * do nothing.
4197 */
4198 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
4199 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
4200 (phba->cfg_enable_fip == 0)) {
4201 spin_unlock_irq(&phba->hbalock);
4202 return;
4203 }
4204 spin_unlock_irq(&phba->hbalock);
4205
4206 if (lpfc_fcf_inuse(phba))
4207 return;
4208
4209
4210 /* Unregister VPIs */
4211 vports = lpfc_create_vport_work_array(phba);
4212 if (vports &&
4213 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4214 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4215 lpfc_mbx_unreg_vpi(vports[i]);
4216 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4217 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
4218 }
4219 lpfc_destroy_vport_work_array(phba, vports);
4220
4221 /* Unregister VFI */
4222 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4223 if (!mbox) {
4224 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4225 "2556 UNREG_VFI mbox allocation failed"
4226 "HBA state x%x\n",
4227 phba->pport->port_state);
4228 return;
4229 }
4230
4231 lpfc_unreg_vfi(mbox, phba->pport->vfi);
4232 mbox->vport = phba->pport;
4233 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
4234
4235 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4236 if (rc == MBX_NOT_FINISHED) {
4237 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4238 "2557 UNREG_VFI issue mbox failed rc x%x "
4239 "HBA state x%x\n",
4240 rc, phba->pport->port_state);
4241 mempool_free(mbox, phba->mbox_mem_pool);
4242 return;
4243 }
4244
4245 /* Unregister FCF */
4246 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4247 if (!mbox) {
4248 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4249 "2551 UNREG_FCFI mbox allocation failed"
4250 "HBA state x%x\n",
4251 phba->pport->port_state);
4252 return;
4253 }
4254
4255 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
4256 mbox->vport = phba->pport;
4257 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
4258 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4259
4260 if (rc == MBX_NOT_FINISHED) {
4261 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4262 "2552 UNREG_FCFI issue mbox failed rc x%x "
4263 "HBA state x%x\n",
4264 rc, phba->pport->port_state);
4265 mempool_free(mbox, phba->mbox_mem_pool);
4266 return;
4267 }
4268
4269 spin_lock_irq(&phba->hbalock);
4270 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED |
4271 FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE |
4272 FCF_VALID_VLAN);
4273 spin_unlock_irq(&phba->hbalock);
4274
4275 /*
4276 * If driver is not unloading, check if there is any other
4277 * FCF record that can be used for discovery.
4278 */
4279 if ((phba->pport->load_flag & FC_UNLOADING) ||
4280 (phba->link_state < LPFC_LINK_UP))
4281 return;
4282
4283 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
4284
4285 if (rc)
4286 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4287 "2553 lpfc_unregister_unused_fcf failed to read FCF"
4288 " record HBA state x%x\n",
4289 phba->pport->port_state);
4290}
4291
4292/**
4293 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
4294 * @phba: Pointer to hba context object.
4295 * @buff: Buffer containing the FCF connection table as in the config
4296 * region.
4297 * This function create driver data structure for the FCF connection
4298 * record table read from config region 23.
4299 */
4300static void
4301lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
4302 uint8_t *buff)
4303{
4304 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4305 struct lpfc_fcf_conn_hdr *conn_hdr;
4306 struct lpfc_fcf_conn_rec *conn_rec;
4307 uint32_t record_count;
4308 int i;
4309
4310 /* Free the current connect table */
4311 list_for_each_entry_safe(conn_entry, next_conn_entry,
4312 &phba->fcf_conn_rec_list, list)
4313 kfree(conn_entry);
4314
4315 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
4316 record_count = conn_hdr->length * sizeof(uint32_t)/
4317 sizeof(struct lpfc_fcf_conn_rec);
4318
4319 conn_rec = (struct lpfc_fcf_conn_rec *)
4320 (buff + sizeof(struct lpfc_fcf_conn_hdr));
4321
4322 for (i = 0; i < record_count; i++) {
4323 if (!(conn_rec[i].flags & FCFCNCT_VALID))
4324 continue;
4325 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
4326 GFP_KERNEL);
4327 if (!conn_entry) {
4328 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4329 "2566 Failed to allocate connection"
4330 " table entry\n");
4331 return;
4332 }
4333
4334 memcpy(&conn_entry->conn_rec, &conn_rec[i],
4335 sizeof(struct lpfc_fcf_conn_rec));
4336 conn_entry->conn_rec.vlan_tag =
4337 le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF;
4338 conn_entry->conn_rec.flags =
4339 le16_to_cpu(conn_entry->conn_rec.flags);
4340 list_add_tail(&conn_entry->list,
4341 &phba->fcf_conn_rec_list);
4342 }
4343}
4344
4345/**
4346 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
4347 * @phba: Pointer to hba context object.
4348 * @buff: Buffer containing the FCoE parameter data structure.
4349 *
4350 * This function update driver data structure with config
4351 * parameters read from config region 23.
4352 */
4353static void
4354lpfc_read_fcoe_param(struct lpfc_hba *phba,
4355 uint8_t *buff)
4356{
4357 struct lpfc_fip_param_hdr *fcoe_param_hdr;
4358 struct lpfc_fcoe_params *fcoe_param;
4359
4360 fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
4361 buff;
4362 fcoe_param = (struct lpfc_fcoe_params *)
4363 buff + sizeof(struct lpfc_fip_param_hdr);
4364
4365 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
4366 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
4367 return;
4368
4369 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4370 FIPP_MODE_ON)
4371 phba->cfg_enable_fip = 1;
4372
4373 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4374 FIPP_MODE_OFF)
4375 phba->cfg_enable_fip = 0;
4376
4377 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
4378 phba->valid_vlan = 1;
4379 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
4380 0xFFF;
4381 }
4382
4383 phba->fc_map[0] = fcoe_param->fc_map[0];
4384 phba->fc_map[1] = fcoe_param->fc_map[1];
4385 phba->fc_map[2] = fcoe_param->fc_map[2];
4386 return;
4387}
4388
4389/**
4390 * lpfc_get_rec_conf23 - Get a record type in config region data.
4391 * @buff: Buffer containing config region 23 data.
4392 * @size: Size of the data buffer.
4393 * @rec_type: Record type to be searched.
4394 *
4395 * This function searches config region data to find the begining
4396 * of the record specified by record_type. If record found, this
4397 * function return pointer to the record else return NULL.
4398 */
4399static uint8_t *
4400lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
4401{
4402 uint32_t offset = 0, rec_length;
4403
4404 if ((buff[0] == LPFC_REGION23_LAST_REC) ||
4405 (size < sizeof(uint32_t)))
4406 return NULL;
4407
4408 rec_length = buff[offset + 1];
4409
4410 /*
4411 * One TLV record has one word header and number of data words
4412 * specified in the rec_length field of the record header.
4413 */
4414 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
4415 <= size) {
4416 if (buff[offset] == rec_type)
4417 return &buff[offset];
4418
4419 if (buff[offset] == LPFC_REGION23_LAST_REC)
4420 return NULL;
4421
4422 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
4423 rec_length = buff[offset + 1];
4424 }
4425 return NULL;
4426}
4427
4428/**
4429 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
4430 * @phba: Pointer to lpfc_hba data structure.
4431 * @buff: Buffer containing config region 23 data.
4432 * @size: Size of the data buffer.
4433 *
4434 * This fuction parse the FCoE config parameters in config region 23 and
4435 * populate driver data structure with the parameters.
4436 */
4437void
4438lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
4439 uint8_t *buff,
4440 uint32_t size)
4441{
4442 uint32_t offset = 0, rec_length;
4443 uint8_t *rec_ptr;
4444
4445 /*
4446 * If data size is less than 2 words signature and version cannot be
4447 * verified.
4448 */
4449 if (size < 2*sizeof(uint32_t))
4450 return;
4451
4452 /* Check the region signature first */
4453 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
4454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4455 "2567 Config region 23 has bad signature\n");
4456 return;
4457 }
4458
4459 offset += 4;
4460
4461 /* Check the data structure version */
4462 if (buff[offset] != LPFC_REGION23_VERSION) {
4463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4464 "2568 Config region 23 has bad version\n");
4465 return;
4466 }
4467 offset += 4;
4468
4469 rec_length = buff[offset + 1];
4470
4471 /* Read FCoE param record */
4472 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
4473 size - offset, FCOE_PARAM_TYPE);
4474 if (rec_ptr)
4475 lpfc_read_fcoe_param(phba, rec_ptr);
4476
4477 /* Read FCF connection table */
4478 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
4479 size - offset, FCOE_CONN_TBL_TYPE);
4480 if (rec_ptr)
4481 lpfc_read_fcf_conn_tbl(phba, rec_ptr);
4482
4483}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 4168c7b498b8..8a3a026667e4 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -471,6 +471,35 @@ struct serv_parm { /* Structure is in Big Endian format */
471}; 471};
472 472
473/* 473/*
474 * Virtual Fabric Tagging Header
475 */
476struct fc_vft_header {
477 uint32_t word0;
478#define fc_vft_hdr_r_ctl_SHIFT 24
479#define fc_vft_hdr_r_ctl_MASK 0xFF
480#define fc_vft_hdr_r_ctl_WORD word0
481#define fc_vft_hdr_ver_SHIFT 22
482#define fc_vft_hdr_ver_MASK 0x3
483#define fc_vft_hdr_ver_WORD word0
484#define fc_vft_hdr_type_SHIFT 18
485#define fc_vft_hdr_type_MASK 0xF
486#define fc_vft_hdr_type_WORD word0
487#define fc_vft_hdr_e_SHIFT 16
488#define fc_vft_hdr_e_MASK 0x1
489#define fc_vft_hdr_e_WORD word0
490#define fc_vft_hdr_priority_SHIFT 13
491#define fc_vft_hdr_priority_MASK 0x7
492#define fc_vft_hdr_priority_WORD word0
493#define fc_vft_hdr_vf_id_SHIFT 1
494#define fc_vft_hdr_vf_id_MASK 0xFFF
495#define fc_vft_hdr_vf_id_WORD word0
496 uint32_t word1;
497#define fc_vft_hdr_hopct_SHIFT 24
498#define fc_vft_hdr_hopct_MASK 0xFF
499#define fc_vft_hdr_hopct_WORD word1
500};
501
502/*
474 * Extended Link Service LS_COMMAND codes (Payload Word 0) 503 * Extended Link Service LS_COMMAND codes (Payload Word 0)
475 */ 504 */
476#ifdef __BIG_ENDIAN_BITFIELD 505#ifdef __BIG_ENDIAN_BITFIELD
@@ -1152,6 +1181,8 @@ typedef struct {
1152#define PCI_DEVICE_ID_HORNET 0xfe05 1181#define PCI_DEVICE_ID_HORNET 0xfe05
1153#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 1182#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
1154#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 1183#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
1184#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
1185#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1155 1186
1156#define JEDEC_ID_ADDRESS 0x0080001c 1187#define JEDEC_ID_ADDRESS 0x0080001c
1157#define FIREFLY_JEDEC_ID 0x1ACC 1188#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1342,15 +1373,21 @@ typedef struct { /* FireFly BIU registers */
1342#define MBX_READ_LA64 0x95 1373#define MBX_READ_LA64 0x95
1343#define MBX_REG_VPI 0x96 1374#define MBX_REG_VPI 0x96
1344#define MBX_UNREG_VPI 0x97 1375#define MBX_UNREG_VPI 0x97
1345#define MBX_REG_VNPID 0x96
1346#define MBX_UNREG_VNPID 0x97
1347 1376
1348#define MBX_WRITE_WWN 0x98 1377#define MBX_WRITE_WWN 0x98
1349#define MBX_SET_DEBUG 0x99 1378#define MBX_SET_DEBUG 0x99
1350#define MBX_LOAD_EXP_ROM 0x9C 1379#define MBX_LOAD_EXP_ROM 0x9C
1351 1380#define MBX_SLI4_CONFIG 0x9B
1352#define MBX_MAX_CMDS 0x9D 1381#define MBX_SLI4_REQ_FTRS 0x9D
1382#define MBX_MAX_CMDS 0x9E
1383#define MBX_RESUME_RPI 0x9E
1353#define MBX_SLI2_CMD_MASK 0x80 1384#define MBX_SLI2_CMD_MASK 0x80
1385#define MBX_REG_VFI 0x9F
1386#define MBX_REG_FCFI 0xA0
1387#define MBX_UNREG_VFI 0xA1
1388#define MBX_UNREG_FCFI 0xA2
1389#define MBX_INIT_VFI 0xA3
1390#define MBX_INIT_VPI 0xA4
1354 1391
1355/* IOCB Commands */ 1392/* IOCB Commands */
1356 1393
@@ -1440,6 +1477,16 @@ typedef struct { /* FireFly BIU registers */
1440#define CMD_IOCB_LOGENTRY_CN 0x94 1477#define CMD_IOCB_LOGENTRY_CN 0x94
1441#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 1478#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
1442 1479
1480/* Unhandled Data Security SLI Commands */
1481#define DSSCMD_IWRITE64_CR 0xD8
1482#define DSSCMD_IWRITE64_CX 0xD9
1483#define DSSCMD_IREAD64_CR 0xDA
1484#define DSSCMD_IREAD64_CX 0xDB
1485#define DSSCMD_INVALIDATE_DEK 0xDC
1486#define DSSCMD_SET_KEK 0xDD
1487#define DSSCMD_GET_KEK_ID 0xDE
1488#define DSSCMD_GEN_XFER 0xDF
1489
1443#define CMD_MAX_IOCB_CMD 0xE6 1490#define CMD_MAX_IOCB_CMD 0xE6
1444#define CMD_IOCB_MASK 0xff 1491#define CMD_IOCB_MASK 0xff
1445 1492
@@ -1466,6 +1513,7 @@ typedef struct { /* FireFly BIU registers */
1466#define MBXERR_BAD_RCV_LENGTH 14 1513#define MBXERR_BAD_RCV_LENGTH 14
1467#define MBXERR_DMA_ERROR 15 1514#define MBXERR_DMA_ERROR 15
1468#define MBXERR_ERROR 16 1515#define MBXERR_ERROR 16
1516#define MBXERR_LINK_DOWN 0x33
1469#define MBX_NOT_FINISHED 255 1517#define MBX_NOT_FINISHED 255
1470 1518
1471#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ 1519#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
@@ -1504,32 +1552,6 @@ struct ulp_bde {
1504#endif 1552#endif
1505}; 1553};
1506 1554
1507struct ulp_bde64 { /* SLI-2 */
1508 union ULP_BDE_TUS {
1509 uint32_t w;
1510 struct {
1511#ifdef __BIG_ENDIAN_BITFIELD
1512 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1513 VALUE !! */
1514 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
1515#else /* __LITTLE_ENDIAN_BITFIELD */
1516 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
1517 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1518 VALUE !! */
1519#endif
1520#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
1521#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
1522#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
1523#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
1524#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
1525#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
1526#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
1527 } f;
1528 } tus;
1529 uint32_t addrLow;
1530 uint32_t addrHigh;
1531};
1532
1533typedef struct ULP_BDL { /* SLI-2 */ 1555typedef struct ULP_BDL { /* SLI-2 */
1534#ifdef __BIG_ENDIAN_BITFIELD 1556#ifdef __BIG_ENDIAN_BITFIELD
1535 uint32_t bdeFlags:8; /* BDL Flags */ 1557 uint32_t bdeFlags:8; /* BDL Flags */
@@ -2287,7 +2309,7 @@ typedef struct {
2287 uint32_t rsvd3; 2309 uint32_t rsvd3;
2288 uint32_t rsvd4; 2310 uint32_t rsvd4;
2289 uint32_t rsvd5; 2311 uint32_t rsvd5;
2290 uint16_t rsvd6; 2312 uint16_t vfi;
2291 uint16_t vpi; 2313 uint16_t vpi;
2292#else /* __LITTLE_ENDIAN */ 2314#else /* __LITTLE_ENDIAN */
2293 uint32_t rsvd1; 2315 uint32_t rsvd1;
@@ -2297,7 +2319,7 @@ typedef struct {
2297 uint32_t rsvd4; 2319 uint32_t rsvd4;
2298 uint32_t rsvd5; 2320 uint32_t rsvd5;
2299 uint16_t vpi; 2321 uint16_t vpi;
2300 uint16_t rsvd6; 2322 uint16_t vfi;
2301#endif 2323#endif
2302} REG_VPI_VAR; 2324} REG_VPI_VAR;
2303 2325
@@ -2457,7 +2479,7 @@ typedef struct {
2457 uint32_t entry_index:16; 2479 uint32_t entry_index:16;
2458#endif 2480#endif
2459 2481
2460 uint32_t rsvd1; 2482 uint32_t sli4_length;
2461 uint32_t word_cnt; 2483 uint32_t word_cnt;
2462 uint32_t resp_offset; 2484 uint32_t resp_offset;
2463} DUMP_VAR; 2485} DUMP_VAR;
@@ -2470,9 +2492,32 @@ typedef struct {
2470#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ 2492#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
2471#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ 2493#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
2472 2494
2495#define DMP_REGION_VPORT 0x16 /* VPort info region */
2496#define DMP_VPORT_REGION_SIZE 0x200
2497#define DMP_MBOX_OFFSET_WORD 0x5
2498
2499#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */
2500#define DMP_FCOEPARAM_RGN_SIZE 0x400
2501
2473#define WAKE_UP_PARMS_REGION_ID 4 2502#define WAKE_UP_PARMS_REGION_ID 4
2474#define WAKE_UP_PARMS_WORD_SIZE 15 2503#define WAKE_UP_PARMS_WORD_SIZE 15
2475 2504
2505struct vport_rec {
2506 uint8_t wwpn[8];
2507 uint8_t wwnn[8];
2508};
2509
2510#define VPORT_INFO_SIG 0x32324752
2511#define VPORT_INFO_REV_MASK 0xff
2512#define VPORT_INFO_REV 0x1
2513#define MAX_STATIC_VPORT_COUNT 16
2514struct static_vport_info {
2515 uint32_t signature;
2516 uint32_t rev;
2517 struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
2518 uint32_t resvd[66];
2519};
2520
2476/* Option rom version structure */ 2521/* Option rom version structure */
2477struct prog_id { 2522struct prog_id {
2478#ifdef __BIG_ENDIAN_BITFIELD 2523#ifdef __BIG_ENDIAN_BITFIELD
@@ -2697,7 +2742,9 @@ typedef struct {
2697#endif 2742#endif
2698 2743
2699#ifdef __BIG_ENDIAN_BITFIELD 2744#ifdef __BIG_ENDIAN_BITFIELD
2700 uint32_t rsvd1 : 23; /* Reserved */ 2745 uint32_t rsvd1 : 19; /* Reserved */
2746 uint32_t cdss : 1; /* Configure Data Security SLI */
2747 uint32_t rsvd2 : 3; /* Reserved */
2701 uint32_t cbg : 1; /* Configure BlockGuard */ 2748 uint32_t cbg : 1; /* Configure BlockGuard */
2702 uint32_t cmv : 1; /* Configure Max VPIs */ 2749 uint32_t cmv : 1; /* Configure Max VPIs */
2703 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2750 uint32_t ccrp : 1; /* Config Command Ring Polling */
@@ -2717,10 +2764,14 @@ typedef struct {
2717 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2764 uint32_t ccrp : 1; /* Config Command Ring Polling */
2718 uint32_t cmv : 1; /* Configure Max VPIs */ 2765 uint32_t cmv : 1; /* Configure Max VPIs */
2719 uint32_t cbg : 1; /* Configure BlockGuard */ 2766 uint32_t cbg : 1; /* Configure BlockGuard */
2720 uint32_t rsvd1 : 23; /* Reserved */ 2767 uint32_t rsvd2 : 3; /* Reserved */
2768 uint32_t cdss : 1; /* Configure Data Security SLI */
2769 uint32_t rsvd1 : 19; /* Reserved */
2721#endif 2770#endif
2722#ifdef __BIG_ENDIAN_BITFIELD 2771#ifdef __BIG_ENDIAN_BITFIELD
2723 uint32_t rsvd2 : 23; /* Reserved */ 2772 uint32_t rsvd3 : 19; /* Reserved */
2773 uint32_t gdss : 1; /* Configure Data Security SLI */
2774 uint32_t rsvd4 : 3; /* Reserved */
2724 uint32_t gbg : 1; /* Grant BlockGuard */ 2775 uint32_t gbg : 1; /* Grant BlockGuard */
2725 uint32_t gmv : 1; /* Grant Max VPIs */ 2776 uint32_t gmv : 1; /* Grant Max VPIs */
2726 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2777 uint32_t gcrp : 1; /* Grant Command Ring Polling */
@@ -2740,7 +2791,9 @@ typedef struct {
2740 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2791 uint32_t gcrp : 1; /* Grant Command Ring Polling */
2741 uint32_t gmv : 1; /* Grant Max VPIs */ 2792 uint32_t gmv : 1; /* Grant Max VPIs */
2742 uint32_t gbg : 1; /* Grant BlockGuard */ 2793 uint32_t gbg : 1; /* Grant BlockGuard */
2743 uint32_t rsvd2 : 23; /* Reserved */ 2794 uint32_t rsvd4 : 3; /* Reserved */
2795 uint32_t gdss : 1; /* Configure Data Security SLI */
2796 uint32_t rsvd3 : 19; /* Reserved */
2744#endif 2797#endif
2745 2798
2746#ifdef __BIG_ENDIAN_BITFIELD 2799#ifdef __BIG_ENDIAN_BITFIELD
@@ -2753,20 +2806,20 @@ typedef struct {
2753 2806
2754#ifdef __BIG_ENDIAN_BITFIELD 2807#ifdef __BIG_ENDIAN_BITFIELD
2755 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ 2808 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2756 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ 2809 uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
2757#else /* __LITTLE_ENDIAN */ 2810#else /* __LITTLE_ENDIAN */
2758 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ 2811 uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
2759 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ 2812 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2760#endif 2813#endif
2761 2814
2762 uint32_t rsvd4; /* Reserved */ 2815 uint32_t rsvd6; /* Reserved */
2763 2816
2764#ifdef __BIG_ENDIAN_BITFIELD 2817#ifdef __BIG_ENDIAN_BITFIELD
2765 uint32_t rsvd5 : 16; /* Reserved */ 2818 uint32_t rsvd7 : 16; /* Reserved */
2766 uint32_t max_vpi : 16; /* Max number of virt N-Ports */ 2819 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2767#else /* __LITTLE_ENDIAN */ 2820#else /* __LITTLE_ENDIAN */
2768 uint32_t max_vpi : 16; /* Max number of virt N-Ports */ 2821 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2769 uint32_t rsvd5 : 16; /* Reserved */ 2822 uint32_t rsvd7 : 16; /* Reserved */
2770#endif 2823#endif
2771 2824
2772} CONFIG_PORT_VAR; 2825} CONFIG_PORT_VAR;
@@ -3666,3 +3719,5 @@ lpfc_error_lost_link(IOCB_t *iocbp)
3666#define MENLO_TIMEOUT 30 3719#define MENLO_TIMEOUT 30
3667#define SETVAR_MLOMNT 0x103107 3720#define SETVAR_MLOMNT 0x103107
3668#define SETVAR_MLORST 0x103007 3721#define SETVAR_MLORST 0x103007
3722
3723#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
new file mode 100644
index 000000000000..2995d128f07f
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -0,0 +1,2141 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21/* Macros to deal with bit fields. Each bit field must have 3 #defines
22 * associated with it (_SHIFT, _MASK, and _WORD).
23 * EG. For a bit field that is in the 7th bit of the "field4" field of a
24 * structure and is 2 bits in size the following #defines must exist:
25 * struct temp {
26 * uint32_t field1;
27 * uint32_t field2;
28 * uint32_t field3;
29 * uint32_t field4;
30 * #define example_bit_field_SHIFT 7
31 * #define example_bit_field_MASK 0x03
32 * #define example_bit_field_WORD field4
33 * uint32_t field5;
34 * };
35 * Then the macros below may be used to get or set the value of that field.
36 * EG. To get the value of the bit field from the above example:
37 * struct temp t1;
38 * value = bf_get(example_bit_field, &t1);
39 * And then to set that bit field:
40 * bf_set(example_bit_field, &t1, 2);
41 * Or clear that bit field:
42 * bf_set(example_bit_field, &t1, 0);
43 */
44#define bf_get(name, ptr) \
45 (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
46#define bf_set(name, ptr, value) \
47 ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
48 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
49
50struct dma_address {
51 uint32_t addr_lo;
52 uint32_t addr_hi;
53};
54
55#define LPFC_SLI4_BAR0 1
56#define LPFC_SLI4_BAR1 2
57#define LPFC_SLI4_BAR2 4
58
59#define LPFC_SLI4_MBX_EMBED true
60#define LPFC_SLI4_MBX_NEMBED false
61
62#define LPFC_SLI4_MB_WORD_COUNT 64
63#define LPFC_MAX_MQ_PAGE 8
64#define LPFC_MAX_WQ_PAGE 8
65#define LPFC_MAX_CQ_PAGE 4
66#define LPFC_MAX_EQ_PAGE 8
67
68#define LPFC_VIR_FUNC_MAX 32 /* Maximum number of virtual functions */
69#define LPFC_PCI_FUNC_MAX 5 /* Maximum number of PCI functions */
70#define LPFC_VFR_PAGE_SIZE 0x1000 /* 4KB BAR2 per-VF register page size */
71
72/* Define SLI4 Alignment requirements. */
73#define LPFC_ALIGN_16_BYTE 16
74#define LPFC_ALIGN_64_BYTE 64
75
76/* Define SLI4 specific definitions. */
77#define LPFC_MQ_CQE_BYTE_OFFSET 256
78#define LPFC_MBX_CMD_HDR_LENGTH 16
79#define LPFC_MBX_ERROR_RANGE 0x4000
80#define LPFC_BMBX_BIT1_ADDR_HI 0x2
81#define LPFC_BMBX_BIT1_ADDR_LO 0
82#define LPFC_RPI_HDR_COUNT 64
83#define LPFC_HDR_TEMPLATE_SIZE 4096
84#define LPFC_RPI_ALLOC_ERROR 0xFFFF
85#define LPFC_FCF_RECORD_WD_CNT 132
86#define LPFC_ENTIRE_FCF_DATABASE 0
87#define LPFC_DFLT_FCF_INDEX 0
88
89/* Virtual function numbers */
90#define LPFC_VF0 0
91#define LPFC_VF1 1
92#define LPFC_VF2 2
93#define LPFC_VF3 3
94#define LPFC_VF4 4
95#define LPFC_VF5 5
96#define LPFC_VF6 6
97#define LPFC_VF7 7
98#define LPFC_VF8 8
99#define LPFC_VF9 9
100#define LPFC_VF10 10
101#define LPFC_VF11 11
102#define LPFC_VF12 12
103#define LPFC_VF13 13
104#define LPFC_VF14 14
105#define LPFC_VF15 15
106#define LPFC_VF16 16
107#define LPFC_VF17 17
108#define LPFC_VF18 18
109#define LPFC_VF19 19
110#define LPFC_VF20 20
111#define LPFC_VF21 21
112#define LPFC_VF22 22
113#define LPFC_VF23 23
114#define LPFC_VF24 24
115#define LPFC_VF25 25
116#define LPFC_VF26 26
117#define LPFC_VF27 27
118#define LPFC_VF28 28
119#define LPFC_VF29 29
120#define LPFC_VF30 30
121#define LPFC_VF31 31
122
123/* PCI function numbers */
124#define LPFC_PCI_FUNC0 0
125#define LPFC_PCI_FUNC1 1
126#define LPFC_PCI_FUNC2 2
127#define LPFC_PCI_FUNC3 3
128#define LPFC_PCI_FUNC4 4
129
130/* Active interrupt test count */
131#define LPFC_ACT_INTR_CNT 4
132
133/* Delay Multiplier constant */
134#define LPFC_DMULT_CONST 651042
135#define LPFC_MIM_IMAX 636
136#define LPFC_FP_DEF_IMAX 10000
137#define LPFC_SP_DEF_IMAX 10000
138
139struct ulp_bde64 {
140 union ULP_BDE_TUS {
141 uint32_t w;
142 struct {
143#ifdef __BIG_ENDIAN_BITFIELD
144 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
145 VALUE !! */
146 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
147#else /* __LITTLE_ENDIAN_BITFIELD */
148 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
149 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
150 VALUE !! */
151#endif
152#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
153#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
154#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
155#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
156#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
157#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
158#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
159 } f;
160 } tus;
161 uint32_t addrLow;
162 uint32_t addrHigh;
163};
164
165struct lpfc_sli4_flags {
166 uint32_t word0;
167#define lpfc_fip_flag_SHIFT 0
168#define lpfc_fip_flag_MASK 0x00000001
169#define lpfc_fip_flag_WORD word0
170};
171
172/* event queue entry structure */
173struct lpfc_eqe {
174 uint32_t word0;
175#define lpfc_eqe_resource_id_SHIFT 16
176#define lpfc_eqe_resource_id_MASK 0x000000FF
177#define lpfc_eqe_resource_id_WORD word0
178#define lpfc_eqe_minor_code_SHIFT 4
179#define lpfc_eqe_minor_code_MASK 0x00000FFF
180#define lpfc_eqe_minor_code_WORD word0
181#define lpfc_eqe_major_code_SHIFT 1
182#define lpfc_eqe_major_code_MASK 0x00000007
183#define lpfc_eqe_major_code_WORD word0
184#define lpfc_eqe_valid_SHIFT 0
185#define lpfc_eqe_valid_MASK 0x00000001
186#define lpfc_eqe_valid_WORD word0
187};
188
189/* completion queue entry structure (common fields for all cqe types) */
190struct lpfc_cqe {
191 uint32_t reserved0;
192 uint32_t reserved1;
193 uint32_t reserved2;
194 uint32_t word3;
195#define lpfc_cqe_valid_SHIFT 31
196#define lpfc_cqe_valid_MASK 0x00000001
197#define lpfc_cqe_valid_WORD word3
198#define lpfc_cqe_code_SHIFT 16
199#define lpfc_cqe_code_MASK 0x000000FF
200#define lpfc_cqe_code_WORD word3
201};
202
203/* Completion Queue Entry Status Codes */
204#define CQE_STATUS_SUCCESS 0x0
205#define CQE_STATUS_FCP_RSP_FAILURE 0x1
206#define CQE_STATUS_REMOTE_STOP 0x2
207#define CQE_STATUS_LOCAL_REJECT 0x3
208#define CQE_STATUS_NPORT_RJT 0x4
209#define CQE_STATUS_FABRIC_RJT 0x5
210#define CQE_STATUS_NPORT_BSY 0x6
211#define CQE_STATUS_FABRIC_BSY 0x7
212#define CQE_STATUS_INTERMED_RSP 0x8
213#define CQE_STATUS_LS_RJT 0x9
214#define CQE_STATUS_CMD_REJECT 0xb
215#define CQE_STATUS_FCP_TGT_LENCHECK 0xc
216#define CQE_STATUS_NEED_BUFF_ENTRY 0xf
217
218/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
219#define CQE_HW_STATUS_NO_ERR 0x0
220#define CQE_HW_STATUS_UNDERRUN 0x1
221#define CQE_HW_STATUS_OVERRUN 0x2
222
223/* Completion Queue Entry Codes */
224#define CQE_CODE_COMPL_WQE 0x1
225#define CQE_CODE_RELEASE_WQE 0x2
226#define CQE_CODE_RECEIVE 0x4
227#define CQE_CODE_XRI_ABORTED 0x5
228
229/* completion queue entry for wqe completions */
230struct lpfc_wcqe_complete {
231 uint32_t word0;
232#define lpfc_wcqe_c_request_tag_SHIFT 16
233#define lpfc_wcqe_c_request_tag_MASK 0x0000FFFF
234#define lpfc_wcqe_c_request_tag_WORD word0
235#define lpfc_wcqe_c_status_SHIFT 8
236#define lpfc_wcqe_c_status_MASK 0x000000FF
237#define lpfc_wcqe_c_status_WORD word0
238#define lpfc_wcqe_c_hw_status_SHIFT 0
239#define lpfc_wcqe_c_hw_status_MASK 0x000000FF
240#define lpfc_wcqe_c_hw_status_WORD word0
241 uint32_t total_data_placed;
242 uint32_t parameter;
243 uint32_t word3;
244#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT
245#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK
246#define lpfc_wcqe_c_valid_WORD lpfc_cqe_valid_WORD
247#define lpfc_wcqe_c_xb_SHIFT 28
248#define lpfc_wcqe_c_xb_MASK 0x00000001
249#define lpfc_wcqe_c_xb_WORD word3
250#define lpfc_wcqe_c_pv_SHIFT 27
251#define lpfc_wcqe_c_pv_MASK 0x00000001
252#define lpfc_wcqe_c_pv_WORD word3
253#define lpfc_wcqe_c_priority_SHIFT 24
254#define lpfc_wcqe_c_priority_MASK 0x00000007
255#define lpfc_wcqe_c_priority_WORD word3
256#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT
257#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK
258#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD
259};
260
261/* completion queue entry for wqe release */
262struct lpfc_wcqe_release {
263 uint32_t reserved0;
264 uint32_t reserved1;
265 uint32_t word2;
266#define lpfc_wcqe_r_wq_id_SHIFT 16
267#define lpfc_wcqe_r_wq_id_MASK 0x0000FFFF
268#define lpfc_wcqe_r_wq_id_WORD word2
269#define lpfc_wcqe_r_wqe_index_SHIFT 0
270#define lpfc_wcqe_r_wqe_index_MASK 0x0000FFFF
271#define lpfc_wcqe_r_wqe_index_WORD word2
272 uint32_t word3;
273#define lpfc_wcqe_r_valid_SHIFT lpfc_cqe_valid_SHIFT
274#define lpfc_wcqe_r_valid_MASK lpfc_cqe_valid_MASK
275#define lpfc_wcqe_r_valid_WORD lpfc_cqe_valid_WORD
276#define lpfc_wcqe_r_code_SHIFT lpfc_cqe_code_SHIFT
277#define lpfc_wcqe_r_code_MASK lpfc_cqe_code_MASK
278#define lpfc_wcqe_r_code_WORD lpfc_cqe_code_WORD
279};
280
281struct sli4_wcqe_xri_aborted {
282 uint32_t word0;
283#define lpfc_wcqe_xa_status_SHIFT 8
284#define lpfc_wcqe_xa_status_MASK 0x000000FF
285#define lpfc_wcqe_xa_status_WORD word0
286 uint32_t parameter;
287 uint32_t word2;
288#define lpfc_wcqe_xa_remote_xid_SHIFT 16
289#define lpfc_wcqe_xa_remote_xid_MASK 0x0000FFFF
290#define lpfc_wcqe_xa_remote_xid_WORD word2
291#define lpfc_wcqe_xa_xri_SHIFT 0
292#define lpfc_wcqe_xa_xri_MASK 0x0000FFFF
293#define lpfc_wcqe_xa_xri_WORD word2
294 uint32_t word3;
295#define lpfc_wcqe_xa_valid_SHIFT lpfc_cqe_valid_SHIFT
296#define lpfc_wcqe_xa_valid_MASK lpfc_cqe_valid_MASK
297#define lpfc_wcqe_xa_valid_WORD lpfc_cqe_valid_WORD
298#define lpfc_wcqe_xa_ia_SHIFT 30
299#define lpfc_wcqe_xa_ia_MASK 0x00000001
300#define lpfc_wcqe_xa_ia_WORD word3
301#define CQE_XRI_ABORTED_IA_REMOTE 0
302#define CQE_XRI_ABORTED_IA_LOCAL 1
303#define lpfc_wcqe_xa_br_SHIFT 29
304#define lpfc_wcqe_xa_br_MASK 0x00000001
305#define lpfc_wcqe_xa_br_WORD word3
306#define CQE_XRI_ABORTED_BR_BA_ACC 0
307#define CQE_XRI_ABORTED_BR_BA_RJT 1
308#define lpfc_wcqe_xa_eo_SHIFT 28
309#define lpfc_wcqe_xa_eo_MASK 0x00000001
310#define lpfc_wcqe_xa_eo_WORD word3
311#define CQE_XRI_ABORTED_EO_REMOTE 0
312#define CQE_XRI_ABORTED_EO_LOCAL 1
313#define lpfc_wcqe_xa_code_SHIFT lpfc_cqe_code_SHIFT
314#define lpfc_wcqe_xa_code_MASK lpfc_cqe_code_MASK
315#define lpfc_wcqe_xa_code_WORD lpfc_cqe_code_WORD
316};
317
318/* completion queue entry structure for rqe completion */
319struct lpfc_rcqe {
320 uint32_t word0;
321#define lpfc_rcqe_bindex_SHIFT 16
322#define lpfc_rcqe_bindex_MASK 0x0000FFF
323#define lpfc_rcqe_bindex_WORD word0
324#define lpfc_rcqe_status_SHIFT 8
325#define lpfc_rcqe_status_MASK 0x000000FF
326#define lpfc_rcqe_status_WORD word0
327#define FC_STATUS_RQ_SUCCESS 0x10 /* Async receive successful */
328#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
329#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
330#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
331 uint32_t reserved1;
332 uint32_t word2;
333#define lpfc_rcqe_length_SHIFT 16
334#define lpfc_rcqe_length_MASK 0x0000FFFF
335#define lpfc_rcqe_length_WORD word2
336#define lpfc_rcqe_rq_id_SHIFT 6
337#define lpfc_rcqe_rq_id_MASK 0x000003FF
338#define lpfc_rcqe_rq_id_WORD word2
339#define lpfc_rcqe_fcf_id_SHIFT 0
340#define lpfc_rcqe_fcf_id_MASK 0x0000003F
341#define lpfc_rcqe_fcf_id_WORD word2
342 uint32_t word3;
343#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT
344#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK
345#define lpfc_rcqe_valid_WORD lpfc_cqe_valid_WORD
346#define lpfc_rcqe_port_SHIFT 30
347#define lpfc_rcqe_port_MASK 0x00000001
348#define lpfc_rcqe_port_WORD word3
349#define lpfc_rcqe_hdr_length_SHIFT 24
350#define lpfc_rcqe_hdr_length_MASK 0x0000001F
351#define lpfc_rcqe_hdr_length_WORD word3
352#define lpfc_rcqe_code_SHIFT lpfc_cqe_code_SHIFT
353#define lpfc_rcqe_code_MASK lpfc_cqe_code_MASK
354#define lpfc_rcqe_code_WORD lpfc_cqe_code_WORD
355#define lpfc_rcqe_eof_SHIFT 8
356#define lpfc_rcqe_eof_MASK 0x000000FF
357#define lpfc_rcqe_eof_WORD word3
358#define FCOE_EOFn 0x41
359#define FCOE_EOFt 0x42
360#define FCOE_EOFni 0x49
361#define FCOE_EOFa 0x50
362#define lpfc_rcqe_sof_SHIFT 0
363#define lpfc_rcqe_sof_MASK 0x000000FF
364#define lpfc_rcqe_sof_WORD word3
365#define FCOE_SOFi2 0x2d
366#define FCOE_SOFi3 0x2e
367#define FCOE_SOFn2 0x35
368#define FCOE_SOFn3 0x36
369};
370
371struct lpfc_wqe_generic{
372 struct ulp_bde64 bde;
373 uint32_t word3;
374 uint32_t word4;
375 uint32_t word5;
376 uint32_t word6;
377#define lpfc_wqe_gen_context_SHIFT 16
378#define lpfc_wqe_gen_context_MASK 0x0000FFFF
379#define lpfc_wqe_gen_context_WORD word6
380#define lpfc_wqe_gen_xri_SHIFT 0
381#define lpfc_wqe_gen_xri_MASK 0x0000FFFF
382#define lpfc_wqe_gen_xri_WORD word6
383 uint32_t word7;
384#define lpfc_wqe_gen_lnk_SHIFT 23
385#define lpfc_wqe_gen_lnk_MASK 0x00000001
386#define lpfc_wqe_gen_lnk_WORD word7
387#define lpfc_wqe_gen_erp_SHIFT 22
388#define lpfc_wqe_gen_erp_MASK 0x00000001
389#define lpfc_wqe_gen_erp_WORD word7
390#define lpfc_wqe_gen_pu_SHIFT 20
391#define lpfc_wqe_gen_pu_MASK 0x00000003
392#define lpfc_wqe_gen_pu_WORD word7
393#define lpfc_wqe_gen_class_SHIFT 16
394#define lpfc_wqe_gen_class_MASK 0x00000007
395#define lpfc_wqe_gen_class_WORD word7
396#define lpfc_wqe_gen_command_SHIFT 8
397#define lpfc_wqe_gen_command_MASK 0x000000FF
398#define lpfc_wqe_gen_command_WORD word7
399#define lpfc_wqe_gen_status_SHIFT 4
400#define lpfc_wqe_gen_status_MASK 0x0000000F
401#define lpfc_wqe_gen_status_WORD word7
402#define lpfc_wqe_gen_ct_SHIFT 2
403#define lpfc_wqe_gen_ct_MASK 0x00000007
404#define lpfc_wqe_gen_ct_WORD word7
405 uint32_t abort_tag;
406 uint32_t word9;
407#define lpfc_wqe_gen_request_tag_SHIFT 0
408#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF
409#define lpfc_wqe_gen_request_tag_WORD word9
410 uint32_t word10;
411#define lpfc_wqe_gen_ccp_SHIFT 24
412#define lpfc_wqe_gen_ccp_MASK 0x000000FF
413#define lpfc_wqe_gen_ccp_WORD word10
414#define lpfc_wqe_gen_ccpe_SHIFT 23
415#define lpfc_wqe_gen_ccpe_MASK 0x00000001
416#define lpfc_wqe_gen_ccpe_WORD word10
417#define lpfc_wqe_gen_pv_SHIFT 19
418#define lpfc_wqe_gen_pv_MASK 0x00000001
419#define lpfc_wqe_gen_pv_WORD word10
420#define lpfc_wqe_gen_pri_SHIFT 16
421#define lpfc_wqe_gen_pri_MASK 0x00000007
422#define lpfc_wqe_gen_pri_WORD word10
423 uint32_t word11;
424#define lpfc_wqe_gen_cq_id_SHIFT 16
425#define lpfc_wqe_gen_cq_id_MASK 0x0000FFFF
426#define lpfc_wqe_gen_cq_id_WORD word11
427#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
428#define lpfc_wqe_gen_wqec_SHIFT 7
429#define lpfc_wqe_gen_wqec_MASK 0x00000001
430#define lpfc_wqe_gen_wqec_WORD word11
431#define lpfc_wqe_gen_cmd_type_SHIFT 0
432#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F
433#define lpfc_wqe_gen_cmd_type_WORD word11
434 uint32_t payload[4];
435};
436
437struct lpfc_rqe {
438 uint32_t address_hi;
439 uint32_t address_lo;
440};
441
442/* buffer descriptors */
443struct lpfc_bde4 {
444 uint32_t addr_hi;
445 uint32_t addr_lo;
446 uint32_t word2;
447#define lpfc_bde4_last_SHIFT 31
448#define lpfc_bde4_last_MASK 0x00000001
449#define lpfc_bde4_last_WORD word2
450#define lpfc_bde4_sge_offset_SHIFT 0
451#define lpfc_bde4_sge_offset_MASK 0x000003FF
452#define lpfc_bde4_sge_offset_WORD word2
453 uint32_t word3;
454#define lpfc_bde4_length_SHIFT 0
455#define lpfc_bde4_length_MASK 0x000000FF
456#define lpfc_bde4_length_WORD word3
457};
458
459struct lpfc_register {
460 uint32_t word0;
461};
462
463#define LPFC_UERR_STATUS_HI 0x00A4
464#define LPFC_UERR_STATUS_LO 0x00A0
465#define LPFC_ONLINE0 0x00B0
466#define LPFC_ONLINE1 0x00B4
467#define LPFC_SCRATCHPAD 0x0058
468
469/* BAR0 Registers */
470#define LPFC_HST_STATE 0x00AC
471#define lpfc_hst_state_perr_SHIFT 31
472#define lpfc_hst_state_perr_MASK 0x1
473#define lpfc_hst_state_perr_WORD word0
474#define lpfc_hst_state_sfi_SHIFT 30
475#define lpfc_hst_state_sfi_MASK 0x1
476#define lpfc_hst_state_sfi_WORD word0
477#define lpfc_hst_state_nip_SHIFT 29
478#define lpfc_hst_state_nip_MASK 0x1
479#define lpfc_hst_state_nip_WORD word0
480#define lpfc_hst_state_ipc_SHIFT 28
481#define lpfc_hst_state_ipc_MASK 0x1
482#define lpfc_hst_state_ipc_WORD word0
483#define lpfc_hst_state_xrom_SHIFT 27
484#define lpfc_hst_state_xrom_MASK 0x1
485#define lpfc_hst_state_xrom_WORD word0
486#define lpfc_hst_state_dl_SHIFT 26
487#define lpfc_hst_state_dl_MASK 0x1
488#define lpfc_hst_state_dl_WORD word0
489#define lpfc_hst_state_port_status_SHIFT 0
490#define lpfc_hst_state_port_status_MASK 0xFFFF
491#define lpfc_hst_state_port_status_WORD word0
492
493#define LPFC_POST_STAGE_POWER_ON_RESET 0x0000
494#define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001
495#define LPFC_POST_STAGE_HOST_RDY 0x0002
496#define LPFC_POST_STAGE_BE_RESET 0x0003
497#define LPFC_POST_STAGE_SEEPROM_CS_START 0x0100
498#define LPFC_POST_STAGE_SEEPROM_CS_DONE 0x0101
499#define LPFC_POST_STAGE_DDR_CONFIG_START 0x0200
500#define LPFC_POST_STAGE_DDR_CONFIG_DONE 0x0201
501#define LPFC_POST_STAGE_DDR_CALIBRATE_START 0x0300
502#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE 0x0301
503#define LPFC_POST_STAGE_DDR_TEST_START 0x0400
504#define LPFC_POST_STAGE_DDR_TEST_DONE 0x0401
505#define LPFC_POST_STAGE_REDBOOT_INIT_START 0x0600
506#define LPFC_POST_STAGE_REDBOOT_INIT_DONE 0x0601
507#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START 0x0700
508#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE 0x0701
509#define LPFC_POST_STAGE_ARMFW_START 0x0800
510#define LPFC_POST_STAGE_DHCP_QUERY_START 0x0900
511#define LPFC_POST_STAGE_DHCP_QUERY_DONE 0x0901
512#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START 0x0A00
513#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE 0x0A01
514#define LPFC_POST_STAGE_RC_OPTION_SET 0x0B00
515#define LPFC_POST_STAGE_SWITCH_LINK 0x0B01
516#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE 0x0B02
517#define LPFC_POST_STAGE_PERFROM_TFTP 0x0B03
518#define LPFC_POST_STAGE_PARSE_XML 0x0B04
519#define LPFC_POST_STAGE_DOWNLOAD_IMAGE 0x0B05
520#define LPFC_POST_STAGE_FLASH_IMAGE 0x0B06
521#define LPFC_POST_STAGE_RC_DONE 0x0B07
522#define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08
523#define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00
524#define LPFC_POST_STAGE_ARMFW_READY 0xC000
525#define LPFC_POST_STAGE_ARMFW_UE 0xF000
526
527#define lpfc_scratchpad_slirev_SHIFT 4
528#define lpfc_scratchpad_slirev_MASK 0xF
529#define lpfc_scratchpad_slirev_WORD word0
530#define lpfc_scratchpad_chiptype_SHIFT 8
531#define lpfc_scratchpad_chiptype_MASK 0xFF
532#define lpfc_scratchpad_chiptype_WORD word0
533#define lpfc_scratchpad_featurelevel1_SHIFT 16
534#define lpfc_scratchpad_featurelevel1_MASK 0xFF
535#define lpfc_scratchpad_featurelevel1_WORD word0
536#define lpfc_scratchpad_featurelevel2_SHIFT 24
537#define lpfc_scratchpad_featurelevel2_MASK 0xFF
538#define lpfc_scratchpad_featurelevel2_WORD word0
539
540/* BAR1 Registers */
541#define LPFC_IMR_MASK_ALL 0xFFFFFFFF
542#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF
543
544#define LPFC_HST_ISR0 0x0C18
545#define LPFC_HST_ISR1 0x0C1C
546#define LPFC_HST_ISR2 0x0C20
547#define LPFC_HST_ISR3 0x0C24
548#define LPFC_HST_ISR4 0x0C28
549
550#define LPFC_HST_IMR0 0x0C48
551#define LPFC_HST_IMR1 0x0C4C
552#define LPFC_HST_IMR2 0x0C50
553#define LPFC_HST_IMR3 0x0C54
554#define LPFC_HST_IMR4 0x0C58
555
556#define LPFC_HST_ISCR0 0x0C78
557#define LPFC_HST_ISCR1 0x0C7C
558#define LPFC_HST_ISCR2 0x0C80
559#define LPFC_HST_ISCR3 0x0C84
560#define LPFC_HST_ISCR4 0x0C88
561
562#define LPFC_SLI4_INTR0 BIT0
563#define LPFC_SLI4_INTR1 BIT1
564#define LPFC_SLI4_INTR2 BIT2
565#define LPFC_SLI4_INTR3 BIT3
566#define LPFC_SLI4_INTR4 BIT4
567#define LPFC_SLI4_INTR5 BIT5
568#define LPFC_SLI4_INTR6 BIT6
569#define LPFC_SLI4_INTR7 BIT7
570#define LPFC_SLI4_INTR8 BIT8
571#define LPFC_SLI4_INTR9 BIT9
572#define LPFC_SLI4_INTR10 BIT10
573#define LPFC_SLI4_INTR11 BIT11
574#define LPFC_SLI4_INTR12 BIT12
575#define LPFC_SLI4_INTR13 BIT13
576#define LPFC_SLI4_INTR14 BIT14
577#define LPFC_SLI4_INTR15 BIT15
578#define LPFC_SLI4_INTR16 BIT16
579#define LPFC_SLI4_INTR17 BIT17
580#define LPFC_SLI4_INTR18 BIT18
581#define LPFC_SLI4_INTR19 BIT19
582#define LPFC_SLI4_INTR20 BIT20
583#define LPFC_SLI4_INTR21 BIT21
584#define LPFC_SLI4_INTR22 BIT22
585#define LPFC_SLI4_INTR23 BIT23
586#define LPFC_SLI4_INTR24 BIT24
587#define LPFC_SLI4_INTR25 BIT25
588#define LPFC_SLI4_INTR26 BIT26
589#define LPFC_SLI4_INTR27 BIT27
590#define LPFC_SLI4_INTR28 BIT28
591#define LPFC_SLI4_INTR29 BIT29
592#define LPFC_SLI4_INTR30 BIT30
593#define LPFC_SLI4_INTR31 BIT31
594
595/* BAR2 Registers */
596#define LPFC_RQ_DOORBELL 0x00A0
597#define lpfc_rq_doorbell_num_posted_SHIFT 16
598#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF
599#define lpfc_rq_doorbell_num_posted_WORD word0
600#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */
601#define lpfc_rq_doorbell_id_SHIFT 0
602#define lpfc_rq_doorbell_id_MASK 0x03FF
603#define lpfc_rq_doorbell_id_WORD word0
604
605#define LPFC_WQ_DOORBELL 0x0040
606#define lpfc_wq_doorbell_num_posted_SHIFT 24
607#define lpfc_wq_doorbell_num_posted_MASK 0x00FF
608#define lpfc_wq_doorbell_num_posted_WORD word0
609#define lpfc_wq_doorbell_index_SHIFT 16
610#define lpfc_wq_doorbell_index_MASK 0x00FF
611#define lpfc_wq_doorbell_index_WORD word0
612#define lpfc_wq_doorbell_id_SHIFT 0
613#define lpfc_wq_doorbell_id_MASK 0xFFFF
614#define lpfc_wq_doorbell_id_WORD word0
615
616#define LPFC_EQCQ_DOORBELL 0x0120
617#define lpfc_eqcq_doorbell_arm_SHIFT 29
618#define lpfc_eqcq_doorbell_arm_MASK 0x0001
619#define lpfc_eqcq_doorbell_arm_WORD word0
620#define lpfc_eqcq_doorbell_num_released_SHIFT 16
621#define lpfc_eqcq_doorbell_num_released_MASK 0x1FFF
622#define lpfc_eqcq_doorbell_num_released_WORD word0
623#define lpfc_eqcq_doorbell_qt_SHIFT 10
624#define lpfc_eqcq_doorbell_qt_MASK 0x0001
625#define lpfc_eqcq_doorbell_qt_WORD word0
626#define LPFC_QUEUE_TYPE_COMPLETION 0
627#define LPFC_QUEUE_TYPE_EVENT 1
628#define lpfc_eqcq_doorbell_eqci_SHIFT 9
629#define lpfc_eqcq_doorbell_eqci_MASK 0x0001
630#define lpfc_eqcq_doorbell_eqci_WORD word0
631#define lpfc_eqcq_doorbell_cqid_SHIFT 0
632#define lpfc_eqcq_doorbell_cqid_MASK 0x03FF
633#define lpfc_eqcq_doorbell_cqid_WORD word0
634#define lpfc_eqcq_doorbell_eqid_SHIFT 0
635#define lpfc_eqcq_doorbell_eqid_MASK 0x01FF
636#define lpfc_eqcq_doorbell_eqid_WORD word0
637
638#define LPFC_BMBX 0x0160
639#define lpfc_bmbx_addr_SHIFT 2
640#define lpfc_bmbx_addr_MASK 0x3FFFFFFF
641#define lpfc_bmbx_addr_WORD word0
642#define lpfc_bmbx_hi_SHIFT 1
643#define lpfc_bmbx_hi_MASK 0x0001
644#define lpfc_bmbx_hi_WORD word0
645#define lpfc_bmbx_rdy_SHIFT 0
646#define lpfc_bmbx_rdy_MASK 0x0001
647#define lpfc_bmbx_rdy_WORD word0
648
649#define LPFC_MQ_DOORBELL 0x0140
650#define lpfc_mq_doorbell_num_posted_SHIFT 16
651#define lpfc_mq_doorbell_num_posted_MASK 0x3FFF
652#define lpfc_mq_doorbell_num_posted_WORD word0
653#define lpfc_mq_doorbell_id_SHIFT 0
654#define lpfc_mq_doorbell_id_MASK 0x03FF
655#define lpfc_mq_doorbell_id_WORD word0
656
657struct lpfc_sli4_cfg_mhdr {
658 uint32_t word1;
659#define lpfc_mbox_hdr_emb_SHIFT 0
660#define lpfc_mbox_hdr_emb_MASK 0x00000001
661#define lpfc_mbox_hdr_emb_WORD word1
662#define lpfc_mbox_hdr_sge_cnt_SHIFT 3
663#define lpfc_mbox_hdr_sge_cnt_MASK 0x0000001F
664#define lpfc_mbox_hdr_sge_cnt_WORD word1
665 uint32_t payload_length;
666 uint32_t tag_lo;
667 uint32_t tag_hi;
668 uint32_t reserved5;
669};
670
671union lpfc_sli4_cfg_shdr {
672 struct {
673 uint32_t word6;
674#define lpfc_mbox_hdr_opcode_SHIFT 0
675#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
676#define lpfc_mbox_hdr_opcode_WORD word6
677#define lpfc_mbox_hdr_subsystem_SHIFT 8
678#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
679#define lpfc_mbox_hdr_subsystem_WORD word6
680#define lpfc_mbox_hdr_port_number_SHIFT 16
681#define lpfc_mbox_hdr_port_number_MASK 0x000000FF
682#define lpfc_mbox_hdr_port_number_WORD word6
683#define lpfc_mbox_hdr_domain_SHIFT 24
684#define lpfc_mbox_hdr_domain_MASK 0x000000FF
685#define lpfc_mbox_hdr_domain_WORD word6
686 uint32_t timeout;
687 uint32_t request_length;
688 uint32_t reserved9;
689 } request;
690 struct {
691 uint32_t word6;
692#define lpfc_mbox_hdr_opcode_SHIFT 0
693#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
694#define lpfc_mbox_hdr_opcode_WORD word6
695#define lpfc_mbox_hdr_subsystem_SHIFT 8
696#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
697#define lpfc_mbox_hdr_subsystem_WORD word6
698#define lpfc_mbox_hdr_domain_SHIFT 24
699#define lpfc_mbox_hdr_domain_MASK 0x000000FF
700#define lpfc_mbox_hdr_domain_WORD word6
701 uint32_t word7;
702#define lpfc_mbox_hdr_status_SHIFT 0
703#define lpfc_mbox_hdr_status_MASK 0x000000FF
704#define lpfc_mbox_hdr_status_WORD word7
705#define lpfc_mbox_hdr_add_status_SHIFT 8
706#define lpfc_mbox_hdr_add_status_MASK 0x000000FF
707#define lpfc_mbox_hdr_add_status_WORD word7
708 uint32_t response_length;
709 uint32_t actual_response_length;
710 } response;
711};
712
713/* Mailbox structures */
714struct mbox_header {
715 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
716 union lpfc_sli4_cfg_shdr cfg_shdr;
717};
718
719/* Subsystem Definitions */
720#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
721#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
722
723/* Device Specific Definitions */
724
725/* The HOST ENDIAN defines are in Big Endian format. */
726#define HOST_ENDIAN_LOW_WORD0 0xFF3412FF
727#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF
728
729/* Common Opcodes */
730#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C
731#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D
732#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15
733#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20
734#define LPFC_MBOX_OPCODE_NOP 0x21
735#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
736#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
737#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
738#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
739
740/* FCoE Opcodes */
741#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
742#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY 0x02
743#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES 0x03
744#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES 0x04
745#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE 0x05
746#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY 0x06
747#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE 0x08
748#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09
749#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
750#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
751
752/* Mailbox command structures */
753struct eq_context {
754 uint32_t word0;
755#define lpfc_eq_context_size_SHIFT 31
756#define lpfc_eq_context_size_MASK 0x00000001
757#define lpfc_eq_context_size_WORD word0
758#define LPFC_EQE_SIZE_4 0x0
759#define LPFC_EQE_SIZE_16 0x1
760#define lpfc_eq_context_valid_SHIFT 29
761#define lpfc_eq_context_valid_MASK 0x00000001
762#define lpfc_eq_context_valid_WORD word0
763 uint32_t word1;
764#define lpfc_eq_context_count_SHIFT 26
765#define lpfc_eq_context_count_MASK 0x00000003
766#define lpfc_eq_context_count_WORD word1
767#define LPFC_EQ_CNT_256 0x0
768#define LPFC_EQ_CNT_512 0x1
769#define LPFC_EQ_CNT_1024 0x2
770#define LPFC_EQ_CNT_2048 0x3
771#define LPFC_EQ_CNT_4096 0x4
772 uint32_t word2;
773#define lpfc_eq_context_delay_multi_SHIFT 13
774#define lpfc_eq_context_delay_multi_MASK 0x000003FF
775#define lpfc_eq_context_delay_multi_WORD word2
776 uint32_t reserved3;
777};
778
779struct sgl_page_pairs {
780 uint32_t sgl_pg0_addr_lo;
781 uint32_t sgl_pg0_addr_hi;
782 uint32_t sgl_pg1_addr_lo;
783 uint32_t sgl_pg1_addr_hi;
784};
785
786struct lpfc_mbx_post_sgl_pages {
787 struct mbox_header header;
788 uint32_t word0;
789#define lpfc_post_sgl_pages_xri_SHIFT 0
790#define lpfc_post_sgl_pages_xri_MASK 0x0000FFFF
791#define lpfc_post_sgl_pages_xri_WORD word0
792#define lpfc_post_sgl_pages_xricnt_SHIFT 16
793#define lpfc_post_sgl_pages_xricnt_MASK 0x0000FFFF
794#define lpfc_post_sgl_pages_xricnt_WORD word0
795 struct sgl_page_pairs sgl_pg_pairs[1];
796};
797
798/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */
799struct lpfc_mbx_post_uembed_sgl_page1 {
800 union lpfc_sli4_cfg_shdr cfg_shdr;
801 uint32_t word0;
802 struct sgl_page_pairs sgl_pg_pairs;
803};
804
805struct lpfc_mbx_sge {
806 uint32_t pa_lo;
807 uint32_t pa_hi;
808 uint32_t length;
809};
810
811struct lpfc_mbx_nembed_cmd {
812 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
813#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19
814 struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES];
815};
816
817struct lpfc_mbx_nembed_sge_virt {
818 void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
819};
820
821struct lpfc_mbx_eq_create {
822 struct mbox_header header;
823 union {
824 struct {
825 uint32_t word0;
826#define lpfc_mbx_eq_create_num_pages_SHIFT 0
827#define lpfc_mbx_eq_create_num_pages_MASK 0x0000FFFF
828#define lpfc_mbx_eq_create_num_pages_WORD word0
829 struct eq_context context;
830 struct dma_address page[LPFC_MAX_EQ_PAGE];
831 } request;
832 struct {
833 uint32_t word0;
834#define lpfc_mbx_eq_create_q_id_SHIFT 0
835#define lpfc_mbx_eq_create_q_id_MASK 0x0000FFFF
836#define lpfc_mbx_eq_create_q_id_WORD word0
837 } response;
838 } u;
839};
840
841struct lpfc_mbx_eq_destroy {
842 struct mbox_header header;
843 union {
844 struct {
845 uint32_t word0;
846#define lpfc_mbx_eq_destroy_q_id_SHIFT 0
847#define lpfc_mbx_eq_destroy_q_id_MASK 0x0000FFFF
848#define lpfc_mbx_eq_destroy_q_id_WORD word0
849 } request;
850 struct {
851 uint32_t word0;
852 } response;
853 } u;
854};
855
856struct lpfc_mbx_nop {
857 struct mbox_header header;
858 uint32_t context[2];
859};
860
861struct cq_context {
862 uint32_t word0;
863#define lpfc_cq_context_event_SHIFT 31
864#define lpfc_cq_context_event_MASK 0x00000001
865#define lpfc_cq_context_event_WORD word0
866#define lpfc_cq_context_valid_SHIFT 29
867#define lpfc_cq_context_valid_MASK 0x00000001
868#define lpfc_cq_context_valid_WORD word0
869#define lpfc_cq_context_count_SHIFT 27
870#define lpfc_cq_context_count_MASK 0x00000003
871#define lpfc_cq_context_count_WORD word0
872#define LPFC_CQ_CNT_256 0x0
873#define LPFC_CQ_CNT_512 0x1
874#define LPFC_CQ_CNT_1024 0x2
875 uint32_t word1;
876#define lpfc_cq_eq_id_SHIFT 22
877#define lpfc_cq_eq_id_MASK 0x000000FF
878#define lpfc_cq_eq_id_WORD word1
879 uint32_t reserved0;
880 uint32_t reserved1;
881};
882
883struct lpfc_mbx_cq_create {
884 struct mbox_header header;
885 union {
886 struct {
887 uint32_t word0;
888#define lpfc_mbx_cq_create_num_pages_SHIFT 0
889#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF
890#define lpfc_mbx_cq_create_num_pages_WORD word0
891 struct cq_context context;
892 struct dma_address page[LPFC_MAX_CQ_PAGE];
893 } request;
894 struct {
895 uint32_t word0;
896#define lpfc_mbx_cq_create_q_id_SHIFT 0
897#define lpfc_mbx_cq_create_q_id_MASK 0x0000FFFF
898#define lpfc_mbx_cq_create_q_id_WORD word0
899 } response;
900 } u;
901};
902
903struct lpfc_mbx_cq_destroy {
904 struct mbox_header header;
905 union {
906 struct {
907 uint32_t word0;
908#define lpfc_mbx_cq_destroy_q_id_SHIFT 0
909#define lpfc_mbx_cq_destroy_q_id_MASK 0x0000FFFF
910#define lpfc_mbx_cq_destroy_q_id_WORD word0
911 } request;
912 struct {
913 uint32_t word0;
914 } response;
915 } u;
916};
917
918struct wq_context {
919 uint32_t reserved0;
920 uint32_t reserved1;
921 uint32_t reserved2;
922 uint32_t reserved3;
923};
924
925struct lpfc_mbx_wq_create {
926 struct mbox_header header;
927 union {
928 struct {
929 uint32_t word0;
930#define lpfc_mbx_wq_create_num_pages_SHIFT 0
931#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF
932#define lpfc_mbx_wq_create_num_pages_WORD word0
933#define lpfc_mbx_wq_create_cq_id_SHIFT 16
934#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF
935#define lpfc_mbx_wq_create_cq_id_WORD word0
936 struct dma_address page[LPFC_MAX_WQ_PAGE];
937 } request;
938 struct {
939 uint32_t word0;
940#define lpfc_mbx_wq_create_q_id_SHIFT 0
941#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF
942#define lpfc_mbx_wq_create_q_id_WORD word0
943 } response;
944 } u;
945};
946
947struct lpfc_mbx_wq_destroy {
948 struct mbox_header header;
949 union {
950 struct {
951 uint32_t word0;
952#define lpfc_mbx_wq_destroy_q_id_SHIFT 0
953#define lpfc_mbx_wq_destroy_q_id_MASK 0x0000FFFF
954#define lpfc_mbx_wq_destroy_q_id_WORD word0
955 } request;
956 struct {
957 uint32_t word0;
958 } response;
959 } u;
960};
961
962#define LPFC_HDR_BUF_SIZE 128
963#define LPFC_DATA_BUF_SIZE 4096
964struct rq_context {
965 uint32_t word0;
966#define lpfc_rq_context_rq_size_SHIFT 16
967#define lpfc_rq_context_rq_size_MASK 0x0000000F
968#define lpfc_rq_context_rq_size_WORD word0
969#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */
970#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */
971#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */
972#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */
973 uint32_t reserved1;
974 uint32_t word2;
975#define lpfc_rq_context_cq_id_SHIFT 16
976#define lpfc_rq_context_cq_id_MASK 0x000003FF
977#define lpfc_rq_context_cq_id_WORD word2
978#define lpfc_rq_context_buf_size_SHIFT 0
979#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
980#define lpfc_rq_context_buf_size_WORD word2
981 uint32_t reserved3;
982};
983
984struct lpfc_mbx_rq_create {
985 struct mbox_header header;
986 union {
987 struct {
988 uint32_t word0;
989#define lpfc_mbx_rq_create_num_pages_SHIFT 0
990#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
991#define lpfc_mbx_rq_create_num_pages_WORD word0
992 struct rq_context context;
993 struct dma_address page[LPFC_MAX_WQ_PAGE];
994 } request;
995 struct {
996 uint32_t word0;
997#define lpfc_mbx_rq_create_q_id_SHIFT 0
998#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
999#define lpfc_mbx_rq_create_q_id_WORD word0
1000 } response;
1001 } u;
1002};
1003
1004struct lpfc_mbx_rq_destroy {
1005 struct mbox_header header;
1006 union {
1007 struct {
1008 uint32_t word0;
1009#define lpfc_mbx_rq_destroy_q_id_SHIFT 0
1010#define lpfc_mbx_rq_destroy_q_id_MASK 0x0000FFFF
1011#define lpfc_mbx_rq_destroy_q_id_WORD word0
1012 } request;
1013 struct {
1014 uint32_t word0;
1015 } response;
1016 } u;
1017};
1018
1019struct mq_context {
1020 uint32_t word0;
1021#define lpfc_mq_context_cq_id_SHIFT 22
1022#define lpfc_mq_context_cq_id_MASK 0x000003FF
1023#define lpfc_mq_context_cq_id_WORD word0
1024#define lpfc_mq_context_count_SHIFT 16
1025#define lpfc_mq_context_count_MASK 0x0000000F
1026#define lpfc_mq_context_count_WORD word0
1027#define LPFC_MQ_CNT_16 0x5
1028#define LPFC_MQ_CNT_32 0x6
1029#define LPFC_MQ_CNT_64 0x7
1030#define LPFC_MQ_CNT_128 0x8
1031 uint32_t word1;
1032#define lpfc_mq_context_valid_SHIFT 31
1033#define lpfc_mq_context_valid_MASK 0x00000001
1034#define lpfc_mq_context_valid_WORD word1
1035 uint32_t reserved2;
1036 uint32_t reserved3;
1037};
1038
1039struct lpfc_mbx_mq_create {
1040 struct mbox_header header;
1041 union {
1042 struct {
1043 uint32_t word0;
1044#define lpfc_mbx_mq_create_num_pages_SHIFT 0
1045#define lpfc_mbx_mq_create_num_pages_MASK 0x0000FFFF
1046#define lpfc_mbx_mq_create_num_pages_WORD word0
1047 struct mq_context context;
1048 struct dma_address page[LPFC_MAX_MQ_PAGE];
1049 } request;
1050 struct {
1051 uint32_t word0;
1052#define lpfc_mbx_mq_create_q_id_SHIFT 0
1053#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF
1054#define lpfc_mbx_mq_create_q_id_WORD word0
1055 } response;
1056 } u;
1057};
1058
1059struct lpfc_mbx_mq_destroy {
1060 struct mbox_header header;
1061 union {
1062 struct {
1063 uint32_t word0;
1064#define lpfc_mbx_mq_destroy_q_id_SHIFT 0
1065#define lpfc_mbx_mq_destroy_q_id_MASK 0x0000FFFF
1066#define lpfc_mbx_mq_destroy_q_id_WORD word0
1067 } request;
1068 struct {
1069 uint32_t word0;
1070 } response;
1071 } u;
1072};
1073
1074struct lpfc_mbx_post_hdr_tmpl {
1075 struct mbox_header header;
1076 uint32_t word10;
1077#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT 0
1078#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK 0x0000FFFF
1079#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD word10
1080#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT 16
1081#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK 0x0000FFFF
1082#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD word10
1083 uint32_t rpi_paddr_lo;
1084 uint32_t rpi_paddr_hi;
1085};
1086
1087struct sli4_sge { /* SLI-4 */
1088 uint32_t addr_hi;
1089 uint32_t addr_lo;
1090
1091 uint32_t word2;
1092#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
1093#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF
1094#define lpfc_sli4_sge_offset_WORD word2
1095#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
1096 this flag !! */
1097#define lpfc_sli4_sge_last_MASK 0x00000001
1098#define lpfc_sli4_sge_last_WORD word2
1099 uint32_t word3;
1100#define lpfc_sli4_sge_len_SHIFT 0
1101#define lpfc_sli4_sge_len_MASK 0x0001FFFF
1102#define lpfc_sli4_sge_len_WORD word3
1103};
1104
1105struct fcf_record {
1106 uint32_t max_rcv_size;
1107 uint32_t fka_adv_period;
1108 uint32_t fip_priority;
1109 uint32_t word3;
1110#define lpfc_fcf_record_mac_0_SHIFT 0
1111#define lpfc_fcf_record_mac_0_MASK 0x000000FF
1112#define lpfc_fcf_record_mac_0_WORD word3
1113#define lpfc_fcf_record_mac_1_SHIFT 8
1114#define lpfc_fcf_record_mac_1_MASK 0x000000FF
1115#define lpfc_fcf_record_mac_1_WORD word3
1116#define lpfc_fcf_record_mac_2_SHIFT 16
1117#define lpfc_fcf_record_mac_2_MASK 0x000000FF
1118#define lpfc_fcf_record_mac_2_WORD word3
1119#define lpfc_fcf_record_mac_3_SHIFT 24
1120#define lpfc_fcf_record_mac_3_MASK 0x000000FF
1121#define lpfc_fcf_record_mac_3_WORD word3
1122 uint32_t word4;
1123#define lpfc_fcf_record_mac_4_SHIFT 0
1124#define lpfc_fcf_record_mac_4_MASK 0x000000FF
1125#define lpfc_fcf_record_mac_4_WORD word4
1126#define lpfc_fcf_record_mac_5_SHIFT 8
1127#define lpfc_fcf_record_mac_5_MASK 0x000000FF
1128#define lpfc_fcf_record_mac_5_WORD word4
1129#define lpfc_fcf_record_fcf_avail_SHIFT 16
1130#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF
1131#define lpfc_fcf_record_fcf_avail_WORD word4
1132#define lpfc_fcf_record_mac_addr_prov_SHIFT 24
1133#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF
1134#define lpfc_fcf_record_mac_addr_prov_WORD word4
1135#define LPFC_FCF_FPMA 1 /* Fabric Provided MAC Address */
1136#define LPFC_FCF_SPMA 2 /* Server Provided MAC Address */
1137 uint32_t word5;
1138#define lpfc_fcf_record_fab_name_0_SHIFT 0
1139#define lpfc_fcf_record_fab_name_0_MASK 0x000000FF
1140#define lpfc_fcf_record_fab_name_0_WORD word5
1141#define lpfc_fcf_record_fab_name_1_SHIFT 8
1142#define lpfc_fcf_record_fab_name_1_MASK 0x000000FF
1143#define lpfc_fcf_record_fab_name_1_WORD word5
1144#define lpfc_fcf_record_fab_name_2_SHIFT 16
1145#define lpfc_fcf_record_fab_name_2_MASK 0x000000FF
1146#define lpfc_fcf_record_fab_name_2_WORD word5
1147#define lpfc_fcf_record_fab_name_3_SHIFT 24
1148#define lpfc_fcf_record_fab_name_3_MASK 0x000000FF
1149#define lpfc_fcf_record_fab_name_3_WORD word5
1150 uint32_t word6;
1151#define lpfc_fcf_record_fab_name_4_SHIFT 0
1152#define lpfc_fcf_record_fab_name_4_MASK 0x000000FF
1153#define lpfc_fcf_record_fab_name_4_WORD word6
1154#define lpfc_fcf_record_fab_name_5_SHIFT 8
1155#define lpfc_fcf_record_fab_name_5_MASK 0x000000FF
1156#define lpfc_fcf_record_fab_name_5_WORD word6
1157#define lpfc_fcf_record_fab_name_6_SHIFT 16
1158#define lpfc_fcf_record_fab_name_6_MASK 0x000000FF
1159#define lpfc_fcf_record_fab_name_6_WORD word6
1160#define lpfc_fcf_record_fab_name_7_SHIFT 24
1161#define lpfc_fcf_record_fab_name_7_MASK 0x000000FF
1162#define lpfc_fcf_record_fab_name_7_WORD word6
1163 uint32_t word7;
1164#define lpfc_fcf_record_fc_map_0_SHIFT 0
1165#define lpfc_fcf_record_fc_map_0_MASK 0x000000FF
1166#define lpfc_fcf_record_fc_map_0_WORD word7
1167#define lpfc_fcf_record_fc_map_1_SHIFT 8
1168#define lpfc_fcf_record_fc_map_1_MASK 0x000000FF
1169#define lpfc_fcf_record_fc_map_1_WORD word7
1170#define lpfc_fcf_record_fc_map_2_SHIFT 16
1171#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF
1172#define lpfc_fcf_record_fc_map_2_WORD word7
1173#define lpfc_fcf_record_fcf_valid_SHIFT 24
1174#define lpfc_fcf_record_fcf_valid_MASK 0x000000FF
1175#define lpfc_fcf_record_fcf_valid_WORD word7
1176 uint32_t word8;
1177#define lpfc_fcf_record_fcf_index_SHIFT 0
1178#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF
1179#define lpfc_fcf_record_fcf_index_WORD word8
1180#define lpfc_fcf_record_fcf_state_SHIFT 16
1181#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF
1182#define lpfc_fcf_record_fcf_state_WORD word8
1183 uint8_t vlan_bitmap[512];
1184};
1185
1186struct lpfc_mbx_read_fcf_tbl {
1187 union lpfc_sli4_cfg_shdr cfg_shdr;
1188 union {
1189 struct {
1190 uint32_t word10;
1191#define lpfc_mbx_read_fcf_tbl_indx_SHIFT 0
1192#define lpfc_mbx_read_fcf_tbl_indx_MASK 0x0000FFFF
1193#define lpfc_mbx_read_fcf_tbl_indx_WORD word10
1194 } request;
1195 struct {
1196 uint32_t eventag;
1197 } response;
1198 } u;
1199 uint32_t word11;
1200#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT 0
1201#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK 0x0000FFFF
1202#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD word11
1203};
1204
1205struct lpfc_mbx_add_fcf_tbl_entry {
1206 union lpfc_sli4_cfg_shdr cfg_shdr;
1207 uint32_t word10;
1208#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT 0
1209#define lpfc_mbx_add_fcf_tbl_fcfi_MASK 0x0000FFFF
1210#define lpfc_mbx_add_fcf_tbl_fcfi_WORD word10
1211 struct lpfc_mbx_sge fcf_sge;
1212};
1213
1214struct lpfc_mbx_del_fcf_tbl_entry {
1215 struct mbox_header header;
1216 uint32_t word10;
1217#define lpfc_mbx_del_fcf_tbl_count_SHIFT 0
1218#define lpfc_mbx_del_fcf_tbl_count_MASK 0x0000FFFF
1219#define lpfc_mbx_del_fcf_tbl_count_WORD word10
1220#define lpfc_mbx_del_fcf_tbl_index_SHIFT 16
1221#define lpfc_mbx_del_fcf_tbl_index_MASK 0x0000FFFF
1222#define lpfc_mbx_del_fcf_tbl_index_WORD word10
1223};
1224
1225/* Status field for embedded SLI_CONFIG mailbox command */
1226#define STATUS_SUCCESS 0x0
1227#define STATUS_FAILED 0x1
1228#define STATUS_ILLEGAL_REQUEST 0x2
1229#define STATUS_ILLEGAL_FIELD 0x3
1230#define STATUS_INSUFFICIENT_BUFFER 0x4
1231#define STATUS_UNAUTHORIZED_REQUEST 0x5
1232#define STATUS_FLASHROM_SAVE_FAILED 0x17
1233#define STATUS_FLASHROM_RESTORE_FAILED 0x18
1234#define STATUS_ICCBINDEX_ALLOC_FAILED 0x1a
1235#define STATUS_IOCTLHANDLE_ALLOC_FAILED 0x1b
1236#define STATUS_INVALID_PHY_ADDR_FROM_OSM 0x1c
1237#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM 0x1d
1238#define STATUS_ASSERT_FAILED 0x1e
1239#define STATUS_INVALID_SESSION 0x1f
1240#define STATUS_INVALID_CONNECTION 0x20
1241#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT 0x21
1242#define STATUS_BTL_NO_FREE_SLOT_PATH 0x24
1243#define STATUS_BTL_NO_FREE_SLOT_TGTID 0x25
1244#define STATUS_OSM_DEVSLOT_NOT_FOUND 0x26
1245#define STATUS_FLASHROM_READ_FAILED 0x27
1246#define STATUS_POLL_IOCTL_TIMEOUT 0x28
1247#define STATUS_ERROR_ACITMAIN 0x2a
1248#define STATUS_REBOOT_REQUIRED 0x2c
1249#define STATUS_FCF_IN_USE 0x3a
1250
1251struct lpfc_mbx_sli4_config {
1252 struct mbox_header header;
1253};
1254
1255struct lpfc_mbx_init_vfi {
1256 uint32_t word1;
1257#define lpfc_init_vfi_vr_SHIFT 31
1258#define lpfc_init_vfi_vr_MASK 0x00000001
1259#define lpfc_init_vfi_vr_WORD word1
1260#define lpfc_init_vfi_vt_SHIFT 30
1261#define lpfc_init_vfi_vt_MASK 0x00000001
1262#define lpfc_init_vfi_vt_WORD word1
1263#define lpfc_init_vfi_vf_SHIFT 29
1264#define lpfc_init_vfi_vf_MASK 0x00000001
1265#define lpfc_init_vfi_vf_WORD word1
1266#define lpfc_init_vfi_vfi_SHIFT 0
1267#define lpfc_init_vfi_vfi_MASK 0x0000FFFF
1268#define lpfc_init_vfi_vfi_WORD word1
1269 uint32_t word2;
1270#define lpfc_init_vfi_fcfi_SHIFT 0
1271#define lpfc_init_vfi_fcfi_MASK 0x0000FFFF
1272#define lpfc_init_vfi_fcfi_WORD word2
1273 uint32_t word3;
1274#define lpfc_init_vfi_pri_SHIFT 13
1275#define lpfc_init_vfi_pri_MASK 0x00000007
1276#define lpfc_init_vfi_pri_WORD word3
1277#define lpfc_init_vfi_vf_id_SHIFT 1
1278#define lpfc_init_vfi_vf_id_MASK 0x00000FFF
1279#define lpfc_init_vfi_vf_id_WORD word3
1280 uint32_t word4;
1281#define lpfc_init_vfi_hop_count_SHIFT 24
1282#define lpfc_init_vfi_hop_count_MASK 0x000000FF
1283#define lpfc_init_vfi_hop_count_WORD word4
1284};
1285
1286struct lpfc_mbx_reg_vfi {
1287 uint32_t word1;
1288#define lpfc_reg_vfi_vp_SHIFT 28
1289#define lpfc_reg_vfi_vp_MASK 0x00000001
1290#define lpfc_reg_vfi_vp_WORD word1
1291#define lpfc_reg_vfi_vfi_SHIFT 0
1292#define lpfc_reg_vfi_vfi_MASK 0x0000FFFF
1293#define lpfc_reg_vfi_vfi_WORD word1
1294 uint32_t word2;
1295#define lpfc_reg_vfi_vpi_SHIFT 16
1296#define lpfc_reg_vfi_vpi_MASK 0x0000FFFF
1297#define lpfc_reg_vfi_vpi_WORD word2
1298#define lpfc_reg_vfi_fcfi_SHIFT 0
1299#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF
1300#define lpfc_reg_vfi_fcfi_WORD word2
1301 uint32_t word3_rsvd;
1302 uint32_t word4_rsvd;
1303 struct ulp_bde64 bde;
1304 uint32_t word8_rsvd;
1305 uint32_t word9_rsvd;
1306 uint32_t word10;
1307#define lpfc_reg_vfi_nport_id_SHIFT 0
1308#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF
1309#define lpfc_reg_vfi_nport_id_WORD word10
1310};
1311
1312struct lpfc_mbx_init_vpi {
1313 uint32_t word1;
1314#define lpfc_init_vpi_vfi_SHIFT 16
1315#define lpfc_init_vpi_vfi_MASK 0x0000FFFF
1316#define lpfc_init_vpi_vfi_WORD word1
1317#define lpfc_init_vpi_vpi_SHIFT 0
1318#define lpfc_init_vpi_vpi_MASK 0x0000FFFF
1319#define lpfc_init_vpi_vpi_WORD word1
1320};
1321
1322struct lpfc_mbx_read_vpi {
1323 uint32_t word1_rsvd;
1324 uint32_t word2;
1325#define lpfc_mbx_read_vpi_vnportid_SHIFT 0
1326#define lpfc_mbx_read_vpi_vnportid_MASK 0x00FFFFFF
1327#define lpfc_mbx_read_vpi_vnportid_WORD word2
1328 uint32_t word3_rsvd;
1329 uint32_t word4;
1330#define lpfc_mbx_read_vpi_acq_alpa_SHIFT 0
1331#define lpfc_mbx_read_vpi_acq_alpa_MASK 0x000000FF
1332#define lpfc_mbx_read_vpi_acq_alpa_WORD word4
1333#define lpfc_mbx_read_vpi_pb_SHIFT 15
1334#define lpfc_mbx_read_vpi_pb_MASK 0x00000001
1335#define lpfc_mbx_read_vpi_pb_WORD word4
1336#define lpfc_mbx_read_vpi_spec_alpa_SHIFT 16
1337#define lpfc_mbx_read_vpi_spec_alpa_MASK 0x000000FF
1338#define lpfc_mbx_read_vpi_spec_alpa_WORD word4
1339#define lpfc_mbx_read_vpi_ns_SHIFT 30
1340#define lpfc_mbx_read_vpi_ns_MASK 0x00000001
1341#define lpfc_mbx_read_vpi_ns_WORD word4
1342#define lpfc_mbx_read_vpi_hl_SHIFT 31
1343#define lpfc_mbx_read_vpi_hl_MASK 0x00000001
1344#define lpfc_mbx_read_vpi_hl_WORD word4
1345 uint32_t word5_rsvd;
1346 uint32_t word6;
1347#define lpfc_mbx_read_vpi_vpi_SHIFT 0
1348#define lpfc_mbx_read_vpi_vpi_MASK 0x0000FFFF
1349#define lpfc_mbx_read_vpi_vpi_WORD word6
1350 uint32_t word7;
1351#define lpfc_mbx_read_vpi_mac_0_SHIFT 0
1352#define lpfc_mbx_read_vpi_mac_0_MASK 0x000000FF
1353#define lpfc_mbx_read_vpi_mac_0_WORD word7
1354#define lpfc_mbx_read_vpi_mac_1_SHIFT 8
1355#define lpfc_mbx_read_vpi_mac_1_MASK 0x000000FF
1356#define lpfc_mbx_read_vpi_mac_1_WORD word7
1357#define lpfc_mbx_read_vpi_mac_2_SHIFT 16
1358#define lpfc_mbx_read_vpi_mac_2_MASK 0x000000FF
1359#define lpfc_mbx_read_vpi_mac_2_WORD word7
1360#define lpfc_mbx_read_vpi_mac_3_SHIFT 24
1361#define lpfc_mbx_read_vpi_mac_3_MASK 0x000000FF
1362#define lpfc_mbx_read_vpi_mac_3_WORD word7
1363 uint32_t word8;
1364#define lpfc_mbx_read_vpi_mac_4_SHIFT 0
1365#define lpfc_mbx_read_vpi_mac_4_MASK 0x000000FF
1366#define lpfc_mbx_read_vpi_mac_4_WORD word8
1367#define lpfc_mbx_read_vpi_mac_5_SHIFT 8
1368#define lpfc_mbx_read_vpi_mac_5_MASK 0x000000FF
1369#define lpfc_mbx_read_vpi_mac_5_WORD word8
1370#define lpfc_mbx_read_vpi_vlan_tag_SHIFT 16
1371#define lpfc_mbx_read_vpi_vlan_tag_MASK 0x00000FFF
1372#define lpfc_mbx_read_vpi_vlan_tag_WORD word8
1373#define lpfc_mbx_read_vpi_vv_SHIFT 28
1374#define lpfc_mbx_read_vpi_vv_MASK 0x0000001
1375#define lpfc_mbx_read_vpi_vv_WORD word8
1376};
1377
1378struct lpfc_mbx_unreg_vfi {
1379 uint32_t word1_rsvd;
1380 uint32_t word2;
1381#define lpfc_unreg_vfi_vfi_SHIFT 0
1382#define lpfc_unreg_vfi_vfi_MASK 0x0000FFFF
1383#define lpfc_unreg_vfi_vfi_WORD word2
1384};
1385
1386struct lpfc_mbx_resume_rpi {
1387 uint32_t word1;
1388#define lpfc_resume_rpi_rpi_SHIFT 0
1389#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF
1390#define lpfc_resume_rpi_rpi_WORD word1
1391 uint32_t event_tag;
1392 uint32_t word3_rsvd;
1393 uint32_t word4_rsvd;
1394 uint32_t word5_rsvd;
1395 uint32_t word6;
1396#define lpfc_resume_rpi_vpi_SHIFT 0
1397#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF
1398#define lpfc_resume_rpi_vpi_WORD word6
1399#define lpfc_resume_rpi_vfi_SHIFT 16
1400#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF
1401#define lpfc_resume_rpi_vfi_WORD word6
1402};
1403
1404#define REG_FCF_INVALID_QID 0xFFFF
1405struct lpfc_mbx_reg_fcfi {
1406 uint32_t word1;
1407#define lpfc_reg_fcfi_info_index_SHIFT 0
1408#define lpfc_reg_fcfi_info_index_MASK 0x0000FFFF
1409#define lpfc_reg_fcfi_info_index_WORD word1
1410#define lpfc_reg_fcfi_fcfi_SHIFT 16
1411#define lpfc_reg_fcfi_fcfi_MASK 0x0000FFFF
1412#define lpfc_reg_fcfi_fcfi_WORD word1
1413 uint32_t word2;
1414#define lpfc_reg_fcfi_rq_id1_SHIFT 0
1415#define lpfc_reg_fcfi_rq_id1_MASK 0x0000FFFF
1416#define lpfc_reg_fcfi_rq_id1_WORD word2
1417#define lpfc_reg_fcfi_rq_id0_SHIFT 16
1418#define lpfc_reg_fcfi_rq_id0_MASK 0x0000FFFF
1419#define lpfc_reg_fcfi_rq_id0_WORD word2
1420 uint32_t word3;
1421#define lpfc_reg_fcfi_rq_id3_SHIFT 0
1422#define lpfc_reg_fcfi_rq_id3_MASK 0x0000FFFF
1423#define lpfc_reg_fcfi_rq_id3_WORD word3
1424#define lpfc_reg_fcfi_rq_id2_SHIFT 16
1425#define lpfc_reg_fcfi_rq_id2_MASK 0x0000FFFF
1426#define lpfc_reg_fcfi_rq_id2_WORD word3
1427 uint32_t word4;
1428#define lpfc_reg_fcfi_type_match0_SHIFT 24
1429#define lpfc_reg_fcfi_type_match0_MASK 0x000000FF
1430#define lpfc_reg_fcfi_type_match0_WORD word4
1431#define lpfc_reg_fcfi_type_mask0_SHIFT 16
1432#define lpfc_reg_fcfi_type_mask0_MASK 0x000000FF
1433#define lpfc_reg_fcfi_type_mask0_WORD word4
1434#define lpfc_reg_fcfi_rctl_match0_SHIFT 8
1435#define lpfc_reg_fcfi_rctl_match0_MASK 0x000000FF
1436#define lpfc_reg_fcfi_rctl_match0_WORD word4
1437#define lpfc_reg_fcfi_rctl_mask0_SHIFT 0
1438#define lpfc_reg_fcfi_rctl_mask0_MASK 0x000000FF
1439#define lpfc_reg_fcfi_rctl_mask0_WORD word4
1440 uint32_t word5;
1441#define lpfc_reg_fcfi_type_match1_SHIFT 24
1442#define lpfc_reg_fcfi_type_match1_MASK 0x000000FF
1443#define lpfc_reg_fcfi_type_match1_WORD word5
1444#define lpfc_reg_fcfi_type_mask1_SHIFT 16
1445#define lpfc_reg_fcfi_type_mask1_MASK 0x000000FF
1446#define lpfc_reg_fcfi_type_mask1_WORD word5
1447#define lpfc_reg_fcfi_rctl_match1_SHIFT 8
1448#define lpfc_reg_fcfi_rctl_match1_MASK 0x000000FF
1449#define lpfc_reg_fcfi_rctl_match1_WORD word5
1450#define lpfc_reg_fcfi_rctl_mask1_SHIFT 0
1451#define lpfc_reg_fcfi_rctl_mask1_MASK 0x000000FF
1452#define lpfc_reg_fcfi_rctl_mask1_WORD word5
1453 uint32_t word6;
1454#define lpfc_reg_fcfi_type_match2_SHIFT 24
1455#define lpfc_reg_fcfi_type_match2_MASK 0x000000FF
1456#define lpfc_reg_fcfi_type_match2_WORD word6
1457#define lpfc_reg_fcfi_type_mask2_SHIFT 16
1458#define lpfc_reg_fcfi_type_mask2_MASK 0x000000FF
1459#define lpfc_reg_fcfi_type_mask2_WORD word6
1460#define lpfc_reg_fcfi_rctl_match2_SHIFT 8
1461#define lpfc_reg_fcfi_rctl_match2_MASK 0x000000FF
1462#define lpfc_reg_fcfi_rctl_match2_WORD word6
1463#define lpfc_reg_fcfi_rctl_mask2_SHIFT 0
1464#define lpfc_reg_fcfi_rctl_mask2_MASK 0x000000FF
1465#define lpfc_reg_fcfi_rctl_mask2_WORD word6
1466 uint32_t word7;
1467#define lpfc_reg_fcfi_type_match3_SHIFT 24
1468#define lpfc_reg_fcfi_type_match3_MASK 0x000000FF
1469#define lpfc_reg_fcfi_type_match3_WORD word7
1470#define lpfc_reg_fcfi_type_mask3_SHIFT 16
1471#define lpfc_reg_fcfi_type_mask3_MASK 0x000000FF
1472#define lpfc_reg_fcfi_type_mask3_WORD word7
1473#define lpfc_reg_fcfi_rctl_match3_SHIFT 8
1474#define lpfc_reg_fcfi_rctl_match3_MASK 0x000000FF
1475#define lpfc_reg_fcfi_rctl_match3_WORD word7
1476#define lpfc_reg_fcfi_rctl_mask3_SHIFT 0
1477#define lpfc_reg_fcfi_rctl_mask3_MASK 0x000000FF
1478#define lpfc_reg_fcfi_rctl_mask3_WORD word7
1479 uint32_t word8;
1480#define lpfc_reg_fcfi_mam_SHIFT 13
1481#define lpfc_reg_fcfi_mam_MASK 0x00000003
1482#define lpfc_reg_fcfi_mam_WORD word8
1483#define LPFC_MAM_BOTH 0 /* Both SPMA and FPMA */
1484#define LPFC_MAM_SPMA 1 /* Server Provided MAC Address */
1485#define LPFC_MAM_FPMA 2 /* Fabric Provided MAC Address */
1486#define lpfc_reg_fcfi_vv_SHIFT 12
1487#define lpfc_reg_fcfi_vv_MASK 0x00000001
1488#define lpfc_reg_fcfi_vv_WORD word8
1489#define lpfc_reg_fcfi_vlan_tag_SHIFT 0
1490#define lpfc_reg_fcfi_vlan_tag_MASK 0x00000FFF
1491#define lpfc_reg_fcfi_vlan_tag_WORD word8
1492};
1493
1494struct lpfc_mbx_unreg_fcfi {
1495 uint32_t word1_rsv;
1496 uint32_t word2;
1497#define lpfc_unreg_fcfi_SHIFT 0
1498#define lpfc_unreg_fcfi_MASK 0x0000FFFF
1499#define lpfc_unreg_fcfi_WORD word2
1500};
1501
1502struct lpfc_mbx_read_rev {
1503 uint32_t word1;
1504#define lpfc_mbx_rd_rev_sli_lvl_SHIFT 16
1505#define lpfc_mbx_rd_rev_sli_lvl_MASK 0x0000000F
1506#define lpfc_mbx_rd_rev_sli_lvl_WORD word1
1507#define lpfc_mbx_rd_rev_fcoe_SHIFT 20
1508#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001
1509#define lpfc_mbx_rd_rev_fcoe_WORD word1
1510#define lpfc_mbx_rd_rev_vpd_SHIFT 29
1511#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001
1512#define lpfc_mbx_rd_rev_vpd_WORD word1
1513 uint32_t first_hw_rev;
1514 uint32_t second_hw_rev;
1515 uint32_t word4_rsvd;
1516 uint32_t third_hw_rev;
1517 uint32_t word6;
1518#define lpfc_mbx_rd_rev_fcph_low_SHIFT 0
1519#define lpfc_mbx_rd_rev_fcph_low_MASK 0x000000FF
1520#define lpfc_mbx_rd_rev_fcph_low_WORD word6
1521#define lpfc_mbx_rd_rev_fcph_high_SHIFT 8
1522#define lpfc_mbx_rd_rev_fcph_high_MASK 0x000000FF
1523#define lpfc_mbx_rd_rev_fcph_high_WORD word6
1524#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT 16
1525#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK 0x000000FF
1526#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD word6
1527#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT 24
1528#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK 0x000000FF
1529#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD word6
1530 uint32_t word7_rsvd;
1531 uint32_t fw_id_rev;
1532 uint8_t fw_name[16];
1533 uint32_t ulp_fw_id_rev;
1534 uint8_t ulp_fw_name[16];
1535 uint32_t word18_47_rsvd[30];
1536 uint32_t word48;
1537#define lpfc_mbx_rd_rev_avail_len_SHIFT 0
1538#define lpfc_mbx_rd_rev_avail_len_MASK 0x00FFFFFF
1539#define lpfc_mbx_rd_rev_avail_len_WORD word48
1540 uint32_t vpd_paddr_low;
1541 uint32_t vpd_paddr_high;
1542 uint32_t avail_vpd_len;
1543 uint32_t rsvd_52_63[12];
1544};
1545
1546struct lpfc_mbx_read_config {
1547 uint32_t word1;
1548#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0
1549#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF
1550#define lpfc_mbx_rd_conf_max_bbc_WORD word1
1551#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
1552#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
1553#define lpfc_mbx_rd_conf_init_bbc_WORD word1
1554 uint32_t word2;
1555#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
1556#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
1557#define lpfc_mbx_rd_conf_nport_did_WORD word2
1558#define lpfc_mbx_rd_conf_topology_SHIFT 24
1559#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
1560#define lpfc_mbx_rd_conf_topology_WORD word2
1561 uint32_t word3;
1562#define lpfc_mbx_rd_conf_ao_SHIFT 0
1563#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
1564#define lpfc_mbx_rd_conf_ao_WORD word3
1565#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
1566#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
1567#define lpfc_mbx_rd_conf_bb_scn_WORD word3
1568#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
1569#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
1570#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
1571#define lpfc_mbx_rd_conf_mc_SHIFT 29
1572#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
1573#define lpfc_mbx_rd_conf_mc_WORD word3
1574 uint32_t word4;
1575#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
1576#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
1577#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
1578 uint32_t word5;
1579#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
1580#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
1581#define lpfc_mbx_rd_conf_lp_tov_WORD word5
1582 uint32_t word6;
1583#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
1584#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
1585#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
1586 uint32_t word7;
1587#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0
1588#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
1589#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
1590 uint32_t word8;
1591#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
1592#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
1593#define lpfc_mbx_rd_conf_al_tov_WORD word8
1594 uint32_t word9;
1595#define lpfc_mbx_rd_conf_lmt_SHIFT 0
1596#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
1597#define lpfc_mbx_rd_conf_lmt_WORD word9
1598 uint32_t word10;
1599#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0
1600#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
1601#define lpfc_mbx_rd_conf_max_alpa_WORD word10
1602 uint32_t word11_rsvd;
1603 uint32_t word12;
1604#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
1605#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
1606#define lpfc_mbx_rd_conf_xri_base_WORD word12
1607#define lpfc_mbx_rd_conf_xri_count_SHIFT 16
1608#define lpfc_mbx_rd_conf_xri_count_MASK 0x0000FFFF
1609#define lpfc_mbx_rd_conf_xri_count_WORD word12
1610 uint32_t word13;
1611#define lpfc_mbx_rd_conf_rpi_base_SHIFT 0
1612#define lpfc_mbx_rd_conf_rpi_base_MASK 0x0000FFFF
1613#define lpfc_mbx_rd_conf_rpi_base_WORD word13
1614#define lpfc_mbx_rd_conf_rpi_count_SHIFT 16
1615#define lpfc_mbx_rd_conf_rpi_count_MASK 0x0000FFFF
1616#define lpfc_mbx_rd_conf_rpi_count_WORD word13
1617 uint32_t word14;
1618#define lpfc_mbx_rd_conf_vpi_base_SHIFT 0
1619#define lpfc_mbx_rd_conf_vpi_base_MASK 0x0000FFFF
1620#define lpfc_mbx_rd_conf_vpi_base_WORD word14
1621#define lpfc_mbx_rd_conf_vpi_count_SHIFT 16
1622#define lpfc_mbx_rd_conf_vpi_count_MASK 0x0000FFFF
1623#define lpfc_mbx_rd_conf_vpi_count_WORD word14
1624 uint32_t word15;
1625#define lpfc_mbx_rd_conf_vfi_base_SHIFT 0
1626#define lpfc_mbx_rd_conf_vfi_base_MASK 0x0000FFFF
1627#define lpfc_mbx_rd_conf_vfi_base_WORD word15
1628#define lpfc_mbx_rd_conf_vfi_count_SHIFT 16
1629#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
1630#define lpfc_mbx_rd_conf_vfi_count_WORD word15
1631 uint32_t word16;
1632#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
1633#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
1634#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
1635#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
1636#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
1637#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
1638 uint32_t word17;
1639#define lpfc_mbx_rd_conf_rq_count_SHIFT 0
1640#define lpfc_mbx_rd_conf_rq_count_MASK 0x0000FFFF
1641#define lpfc_mbx_rd_conf_rq_count_WORD word17
1642#define lpfc_mbx_rd_conf_eq_count_SHIFT 16
1643#define lpfc_mbx_rd_conf_eq_count_MASK 0x0000FFFF
1644#define lpfc_mbx_rd_conf_eq_count_WORD word17
1645 uint32_t word18;
1646#define lpfc_mbx_rd_conf_wq_count_SHIFT 0
1647#define lpfc_mbx_rd_conf_wq_count_MASK 0x0000FFFF
1648#define lpfc_mbx_rd_conf_wq_count_WORD word18
1649#define lpfc_mbx_rd_conf_cq_count_SHIFT 16
1650#define lpfc_mbx_rd_conf_cq_count_MASK 0x0000FFFF
1651#define lpfc_mbx_rd_conf_cq_count_WORD word18
1652};
1653
1654struct lpfc_mbx_request_features {
1655 uint32_t word1;
1656#define lpfc_mbx_rq_ftr_qry_SHIFT 0
1657#define lpfc_mbx_rq_ftr_qry_MASK 0x00000001
1658#define lpfc_mbx_rq_ftr_qry_WORD word1
1659 uint32_t word2;
1660#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT 0
1661#define lpfc_mbx_rq_ftr_rq_iaab_MASK 0x00000001
1662#define lpfc_mbx_rq_ftr_rq_iaab_WORD word2
1663#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT 1
1664#define lpfc_mbx_rq_ftr_rq_npiv_MASK 0x00000001
1665#define lpfc_mbx_rq_ftr_rq_npiv_WORD word2
1666#define lpfc_mbx_rq_ftr_rq_dif_SHIFT 2
1667#define lpfc_mbx_rq_ftr_rq_dif_MASK 0x00000001
1668#define lpfc_mbx_rq_ftr_rq_dif_WORD word2
1669#define lpfc_mbx_rq_ftr_rq_vf_SHIFT 3
1670#define lpfc_mbx_rq_ftr_rq_vf_MASK 0x00000001
1671#define lpfc_mbx_rq_ftr_rq_vf_WORD word2
1672#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT 4
1673#define lpfc_mbx_rq_ftr_rq_fcpi_MASK 0x00000001
1674#define lpfc_mbx_rq_ftr_rq_fcpi_WORD word2
1675#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT 5
1676#define lpfc_mbx_rq_ftr_rq_fcpt_MASK 0x00000001
1677#define lpfc_mbx_rq_ftr_rq_fcpt_WORD word2
1678#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT 6
1679#define lpfc_mbx_rq_ftr_rq_fcpc_MASK 0x00000001
1680#define lpfc_mbx_rq_ftr_rq_fcpc_WORD word2
1681#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7
1682#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001
1683#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2
1684 uint32_t word3;
1685#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
1686#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
1687#define lpfc_mbx_rq_ftr_rsp_iaab_WORD word3
1688#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT 1
1689#define lpfc_mbx_rq_ftr_rsp_npiv_MASK 0x00000001
1690#define lpfc_mbx_rq_ftr_rsp_npiv_WORD word3
1691#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT 2
1692#define lpfc_mbx_rq_ftr_rsp_dif_MASK 0x00000001
1693#define lpfc_mbx_rq_ftr_rsp_dif_WORD word3
1694#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT 3
1695#define lpfc_mbx_rq_ftr_rsp_vf__MASK 0x00000001
1696#define lpfc_mbx_rq_ftr_rsp_vf_WORD word3
1697#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT 4
1698#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK 0x00000001
1699#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD word3
1700#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT 5
1701#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK 0x00000001
1702#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD word3
1703#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT 6
1704#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK 0x00000001
1705#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD word3
1706#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7
1707#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001
1708#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
1709};
1710
1711/* Mailbox Completion Queue Error Messages */
1712#define MB_CQE_STATUS_SUCCESS 0x0
1713#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
1714#define MB_CQE_STATUS_INVALID_PARAMETER 0x2
1715#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3
1716#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
1717#define MB_CQE_STATUS_DMA_FAILED 0x5
1718
1719/* mailbox queue entry structure */
1720struct lpfc_mqe {
1721 uint32_t word0;
1722#define lpfc_mqe_status_SHIFT 16
1723#define lpfc_mqe_status_MASK 0x0000FFFF
1724#define lpfc_mqe_status_WORD word0
1725#define lpfc_mqe_command_SHIFT 8
1726#define lpfc_mqe_command_MASK 0x000000FF
1727#define lpfc_mqe_command_WORD word0
1728 union {
1729 uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1];
1730 /* sli4 mailbox commands */
1731 struct lpfc_mbx_sli4_config sli4_config;
1732 struct lpfc_mbx_init_vfi init_vfi;
1733 struct lpfc_mbx_reg_vfi reg_vfi;
1734 struct lpfc_mbx_reg_vfi unreg_vfi;
1735 struct lpfc_mbx_init_vpi init_vpi;
1736 struct lpfc_mbx_resume_rpi resume_rpi;
1737 struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
1738 struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
1739 struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
1740 struct lpfc_mbx_reg_fcfi reg_fcfi;
1741 struct lpfc_mbx_unreg_fcfi unreg_fcfi;
1742 struct lpfc_mbx_mq_create mq_create;
1743 struct lpfc_mbx_eq_create eq_create;
1744 struct lpfc_mbx_cq_create cq_create;
1745 struct lpfc_mbx_wq_create wq_create;
1746 struct lpfc_mbx_rq_create rq_create;
1747 struct lpfc_mbx_mq_destroy mq_destroy;
1748 struct lpfc_mbx_eq_destroy eq_destroy;
1749 struct lpfc_mbx_cq_destroy cq_destroy;
1750 struct lpfc_mbx_wq_destroy wq_destroy;
1751 struct lpfc_mbx_rq_destroy rq_destroy;
1752 struct lpfc_mbx_post_sgl_pages post_sgl_pages;
1753 struct lpfc_mbx_nembed_cmd nembed_cmd;
1754 struct lpfc_mbx_read_rev read_rev;
1755 struct lpfc_mbx_read_vpi read_vpi;
1756 struct lpfc_mbx_read_config rd_config;
1757 struct lpfc_mbx_request_features req_ftrs;
1758 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
1759 struct lpfc_mbx_nop nop;
1760 } un;
1761};
1762
1763struct lpfc_mcqe {
1764 uint32_t word0;
1765#define lpfc_mcqe_status_SHIFT 0
1766#define lpfc_mcqe_status_MASK 0x0000FFFF
1767#define lpfc_mcqe_status_WORD word0
1768#define lpfc_mcqe_ext_status_SHIFT 16
1769#define lpfc_mcqe_ext_status_MASK 0x0000FFFF
1770#define lpfc_mcqe_ext_status_WORD word0
1771 uint32_t mcqe_tag0;
1772 uint32_t mcqe_tag1;
1773 uint32_t trailer;
1774#define lpfc_trailer_valid_SHIFT 31
1775#define lpfc_trailer_valid_MASK 0x00000001
1776#define lpfc_trailer_valid_WORD trailer
1777#define lpfc_trailer_async_SHIFT 30
1778#define lpfc_trailer_async_MASK 0x00000001
1779#define lpfc_trailer_async_WORD trailer
1780#define lpfc_trailer_hpi_SHIFT 29
1781#define lpfc_trailer_hpi_MASK 0x00000001
1782#define lpfc_trailer_hpi_WORD trailer
1783#define lpfc_trailer_completed_SHIFT 28
1784#define lpfc_trailer_completed_MASK 0x00000001
1785#define lpfc_trailer_completed_WORD trailer
1786#define lpfc_trailer_consumed_SHIFT 27
1787#define lpfc_trailer_consumed_MASK 0x00000001
1788#define lpfc_trailer_consumed_WORD trailer
1789#define lpfc_trailer_type_SHIFT 16
1790#define lpfc_trailer_type_MASK 0x000000FF
1791#define lpfc_trailer_type_WORD trailer
1792#define lpfc_trailer_code_SHIFT 8
1793#define lpfc_trailer_code_MASK 0x000000FF
1794#define lpfc_trailer_code_WORD trailer
1795#define LPFC_TRAILER_CODE_LINK 0x1
1796#define LPFC_TRAILER_CODE_FCOE 0x2
1797#define LPFC_TRAILER_CODE_DCBX 0x3
1798};
1799
1800struct lpfc_acqe_link {
1801 uint32_t word0;
1802#define lpfc_acqe_link_speed_SHIFT 24
1803#define lpfc_acqe_link_speed_MASK 0x000000FF
1804#define lpfc_acqe_link_speed_WORD word0
1805#define LPFC_ASYNC_LINK_SPEED_ZERO 0x0
1806#define LPFC_ASYNC_LINK_SPEED_10MBPS 0x1
1807#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2
1808#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3
1809#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4
1810#define lpfc_acqe_link_duplex_SHIFT 16
1811#define lpfc_acqe_link_duplex_MASK 0x000000FF
1812#define lpfc_acqe_link_duplex_WORD word0
1813#define LPFC_ASYNC_LINK_DUPLEX_NONE 0x0
1814#define LPFC_ASYNC_LINK_DUPLEX_HALF 0x1
1815#define LPFC_ASYNC_LINK_DUPLEX_FULL 0x2
1816#define lpfc_acqe_link_status_SHIFT 8
1817#define lpfc_acqe_link_status_MASK 0x000000FF
1818#define lpfc_acqe_link_status_WORD word0
1819#define LPFC_ASYNC_LINK_STATUS_DOWN 0x0
1820#define LPFC_ASYNC_LINK_STATUS_UP 0x1
1821#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2
1822#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3
1823#define lpfc_acqe_link_physical_SHIFT 0
1824#define lpfc_acqe_link_physical_MASK 0x000000FF
1825#define lpfc_acqe_link_physical_WORD word0
1826#define LPFC_ASYNC_LINK_PORT_A 0x0
1827#define LPFC_ASYNC_LINK_PORT_B 0x1
1828 uint32_t word1;
1829#define lpfc_acqe_link_fault_SHIFT 0
1830#define lpfc_acqe_link_fault_MASK 0x000000FF
1831#define lpfc_acqe_link_fault_WORD word1
1832#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
1833#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
1834#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
1835 uint32_t event_tag;
1836 uint32_t trailer;
1837};
1838
1839struct lpfc_acqe_fcoe {
1840 uint32_t fcf_index;
1841 uint32_t word1;
1842#define lpfc_acqe_fcoe_fcf_count_SHIFT 0
1843#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF
1844#define lpfc_acqe_fcoe_fcf_count_WORD word1
1845#define lpfc_acqe_fcoe_event_type_SHIFT 16
1846#define lpfc_acqe_fcoe_event_type_MASK 0x0000FFFF
1847#define lpfc_acqe_fcoe_event_type_WORD word1
1848#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1
1849#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2
1850#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3
1851 uint32_t event_tag;
1852 uint32_t trailer;
1853};
1854
1855struct lpfc_acqe_dcbx {
1856 uint32_t tlv_ttl;
1857 uint32_t reserved;
1858 uint32_t event_tag;
1859 uint32_t trailer;
1860};
1861
1862/*
1863 * Define the bootstrap mailbox (bmbx) region used to communicate
1864 * mailbox command between the host and port. The mailbox consists
1865 * of a payload area of 256 bytes and a completion queue of length
1866 * 16 bytes.
1867 */
1868struct lpfc_bmbx_create {
1869 struct lpfc_mqe mqe;
1870 struct lpfc_mcqe mcqe;
1871};
1872
1873#define SGL_ALIGN_SZ 64
1874#define SGL_PAGE_SIZE 4096
1875/* align SGL addr on a size boundary - adjust address up */
1876#define NO_XRI ((uint16_t)-1)
1877struct wqe_common {
1878 uint32_t word6;
1879#define wqe_xri_SHIFT 0
1880#define wqe_xri_MASK 0x0000FFFF
1881#define wqe_xri_WORD word6
1882#define wqe_ctxt_tag_SHIFT 16
1883#define wqe_ctxt_tag_MASK 0x0000FFFF
1884#define wqe_ctxt_tag_WORD word6
1885 uint32_t word7;
1886#define wqe_ct_SHIFT 2
1887#define wqe_ct_MASK 0x00000003
1888#define wqe_ct_WORD word7
1889#define wqe_status_SHIFT 4
1890#define wqe_status_MASK 0x0000000f
1891#define wqe_status_WORD word7
1892#define wqe_cmnd_SHIFT 8
1893#define wqe_cmnd_MASK 0x000000ff
1894#define wqe_cmnd_WORD word7
1895#define wqe_class_SHIFT 16
1896#define wqe_class_MASK 0x00000007
1897#define wqe_class_WORD word7
1898#define wqe_pu_SHIFT 20
1899#define wqe_pu_MASK 0x00000003
1900#define wqe_pu_WORD word7
1901#define wqe_erp_SHIFT 22
1902#define wqe_erp_MASK 0x00000001
1903#define wqe_erp_WORD word7
1904#define wqe_lnk_SHIFT 23
1905#define wqe_lnk_MASK 0x00000001
1906#define wqe_lnk_WORD word7
1907#define wqe_tmo_SHIFT 24
1908#define wqe_tmo_MASK 0x000000ff
1909#define wqe_tmo_WORD word7
1910 uint32_t abort_tag; /* word 8 in WQE */
1911 uint32_t word9;
1912#define wqe_reqtag_SHIFT 0
1913#define wqe_reqtag_MASK 0x0000FFFF
1914#define wqe_reqtag_WORD word9
1915#define wqe_rcvoxid_SHIFT 16
1916#define wqe_rcvoxid_MASK 0x0000FFFF
1917#define wqe_rcvoxid_WORD word9
1918 uint32_t word10;
1919#define wqe_pri_SHIFT 16
1920#define wqe_pri_MASK 0x00000007
1921#define wqe_pri_WORD word10
1922#define wqe_pv_SHIFT 19
1923#define wqe_pv_MASK 0x00000001
1924#define wqe_pv_WORD word10
1925#define wqe_xc_SHIFT 21
1926#define wqe_xc_MASK 0x00000001
1927#define wqe_xc_WORD word10
1928#define wqe_ccpe_SHIFT 23
1929#define wqe_ccpe_MASK 0x00000001
1930#define wqe_ccpe_WORD word10
1931#define wqe_ccp_SHIFT 24
1932#define wqe_ccp_MASK 0x000000ff
1933#define wqe_ccp_WORD word10
1934 uint32_t word11;
1935#define wqe_cmd_type_SHIFT 0
1936#define wqe_cmd_type_MASK 0x0000000f
1937#define wqe_cmd_type_WORD word11
1938#define wqe_wqec_SHIFT 7
1939#define wqe_wqec_MASK 0x00000001
1940#define wqe_wqec_WORD word11
1941#define wqe_cqid_SHIFT 16
1942#define wqe_cqid_MASK 0x000003ff
1943#define wqe_cqid_WORD word11
1944};
1945
1946struct wqe_did {
1947 uint32_t word5;
1948#define wqe_els_did_SHIFT 0
1949#define wqe_els_did_MASK 0x00FFFFFF
1950#define wqe_els_did_WORD word5
1951#define wqe_xmit_bls_ar_SHIFT 30
1952#define wqe_xmit_bls_ar_MASK 0x00000001
1953#define wqe_xmit_bls_ar_WORD word5
1954#define wqe_xmit_bls_xo_SHIFT 31
1955#define wqe_xmit_bls_xo_MASK 0x00000001
1956#define wqe_xmit_bls_xo_WORD word5
1957};
1958
1959struct els_request64_wqe {
1960 struct ulp_bde64 bde;
1961 uint32_t payload_len;
1962 uint32_t word4;
1963#define els_req64_sid_SHIFT 0
1964#define els_req64_sid_MASK 0x00FFFFFF
1965#define els_req64_sid_WORD word4
1966#define els_req64_sp_SHIFT 24
1967#define els_req64_sp_MASK 0x00000001
1968#define els_req64_sp_WORD word4
1969#define els_req64_vf_SHIFT 25
1970#define els_req64_vf_MASK 0x00000001
1971#define els_req64_vf_WORD word4
1972 struct wqe_did wqe_dest;
1973 struct wqe_common wqe_com; /* words 6-11 */
1974 uint32_t word12;
1975#define els_req64_vfid_SHIFT 1
1976#define els_req64_vfid_MASK 0x00000FFF
1977#define els_req64_vfid_WORD word12
1978#define els_req64_pri_SHIFT 13
1979#define els_req64_pri_MASK 0x00000007
1980#define els_req64_pri_WORD word12
1981 uint32_t word13;
1982#define els_req64_hopcnt_SHIFT 24
1983#define els_req64_hopcnt_MASK 0x000000ff
1984#define els_req64_hopcnt_WORD word13
1985 uint32_t reserved[2];
1986};
1987
1988struct xmit_els_rsp64_wqe {
1989 struct ulp_bde64 bde;
1990 uint32_t rsvd3;
1991 uint32_t rsvd4;
1992 struct wqe_did wqe_dest;
1993 struct wqe_common wqe_com; /* words 6-11 */
1994 uint32_t rsvd_12_15[4];
1995};
1996
1997struct xmit_bls_rsp64_wqe {
1998 uint32_t payload0;
1999 uint32_t word1;
2000#define xmit_bls_rsp64_rxid_SHIFT 0
2001#define xmit_bls_rsp64_rxid_MASK 0x0000ffff
2002#define xmit_bls_rsp64_rxid_WORD word1
2003#define xmit_bls_rsp64_oxid_SHIFT 16
2004#define xmit_bls_rsp64_oxid_MASK 0x0000ffff
2005#define xmit_bls_rsp64_oxid_WORD word1
2006 uint32_t word2;
2007#define xmit_bls_rsp64_seqcntlo_SHIFT 0
2008#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff
2009#define xmit_bls_rsp64_seqcntlo_WORD word2
2010#define xmit_bls_rsp64_seqcnthi_SHIFT 16
2011#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff
2012#define xmit_bls_rsp64_seqcnthi_WORD word2
2013 uint32_t rsrvd3;
2014 uint32_t rsrvd4;
2015 struct wqe_did wqe_dest;
2016 struct wqe_common wqe_com; /* words 6-11 */
2017 uint32_t rsvd_12_15[4];
2018};
2019struct wqe_rctl_dfctl {
2020 uint32_t word5;
2021#define wqe_si_SHIFT 2
2022#define wqe_si_MASK 0x000000001
2023#define wqe_si_WORD word5
2024#define wqe_la_SHIFT 3
2025#define wqe_la_MASK 0x000000001
2026#define wqe_la_WORD word5
2027#define wqe_ls_SHIFT 7
2028#define wqe_ls_MASK 0x000000001
2029#define wqe_ls_WORD word5
2030#define wqe_dfctl_SHIFT 8
2031#define wqe_dfctl_MASK 0x0000000ff
2032#define wqe_dfctl_WORD word5
2033#define wqe_type_SHIFT 16
2034#define wqe_type_MASK 0x0000000ff
2035#define wqe_type_WORD word5
2036#define wqe_rctl_SHIFT 24
2037#define wqe_rctl_MASK 0x0000000ff
2038#define wqe_rctl_WORD word5
2039};
2040
2041struct xmit_seq64_wqe {
2042 struct ulp_bde64 bde;
2043 uint32_t paylaod_offset;
2044 uint32_t relative_offset;
2045 struct wqe_rctl_dfctl wge_ctl;
2046 struct wqe_common wqe_com; /* words 6-11 */
2047 /* Note: word10 different REVISIT */
2048 uint32_t xmit_len;
2049 uint32_t rsvd_12_15[3];
2050};
2051struct xmit_bcast64_wqe {
2052 struct ulp_bde64 bde;
2053 uint32_t paylaod_len;
2054 uint32_t rsvd4;
2055 struct wqe_rctl_dfctl wge_ctl; /* word 5 */
2056 struct wqe_common wqe_com; /* words 6-11 */
2057 uint32_t rsvd_12_15[4];
2058};
2059
2060struct gen_req64_wqe {
2061 struct ulp_bde64 bde;
2062 uint32_t command_len;
2063 uint32_t payload_len;
2064 struct wqe_rctl_dfctl wge_ctl; /* word 5 */
2065 struct wqe_common wqe_com; /* words 6-11 */
2066 uint32_t rsvd_12_15[4];
2067};
2068
2069struct create_xri_wqe {
2070 uint32_t rsrvd[5]; /* words 0-4 */
2071 struct wqe_did wqe_dest; /* word 5 */
2072 struct wqe_common wqe_com; /* words 6-11 */
2073 uint32_t rsvd_12_15[4]; /* word 12-15 */
2074};
2075
2076#define T_REQUEST_TAG 3
2077#define T_XRI_TAG 1
2078
2079struct abort_cmd_wqe {
2080 uint32_t rsrvd[3];
2081 uint32_t word3;
2082#define abort_cmd_ia_SHIFT 0
2083#define abort_cmd_ia_MASK 0x000000001
2084#define abort_cmd_ia_WORD word3
2085#define abort_cmd_criteria_SHIFT 8
2086#define abort_cmd_criteria_MASK 0x0000000ff
2087#define abort_cmd_criteria_WORD word3
2088 uint32_t rsrvd4;
2089 uint32_t rsrvd5;
2090 struct wqe_common wqe_com; /* words 6-11 */
2091 uint32_t rsvd_12_15[4]; /* word 12-15 */
2092};
2093
2094struct fcp_iwrite64_wqe {
2095 struct ulp_bde64 bde;
2096 uint32_t payload_len;
2097 uint32_t total_xfer_len;
2098 uint32_t initial_xfer_len;
2099 struct wqe_common wqe_com; /* words 6-11 */
2100 uint32_t rsvd_12_15[4]; /* word 12-15 */
2101};
2102
2103struct fcp_iread64_wqe {
2104 struct ulp_bde64 bde;
2105 uint32_t payload_len; /* word 3 */
2106 uint32_t total_xfer_len; /* word 4 */
2107 uint32_t rsrvd5; /* word 5 */
2108 struct wqe_common wqe_com; /* words 6-11 */
2109 uint32_t rsvd_12_15[4]; /* word 12-15 */
2110};
2111
2112struct fcp_icmnd64_wqe {
2113 struct ulp_bde64 bde; /* words 0-2 */
2114 uint32_t rsrvd[3]; /* words 3-5 */
2115 struct wqe_common wqe_com; /* words 6-11 */
2116 uint32_t rsvd_12_15[4]; /* word 12-15 */
2117};
2118
2119
2120union lpfc_wqe {
2121 uint32_t words[16];
2122 struct lpfc_wqe_generic generic;
2123 struct fcp_icmnd64_wqe fcp_icmd;
2124 struct fcp_iread64_wqe fcp_iread;
2125 struct fcp_iwrite64_wqe fcp_iwrite;
2126 struct abort_cmd_wqe abort_cmd;
2127 struct create_xri_wqe create_xri;
2128 struct xmit_bcast64_wqe xmit_bcast64;
2129 struct xmit_seq64_wqe xmit_sequence;
2130 struct xmit_bls_rsp64_wqe xmit_bls_rsp;
2131 struct xmit_els_rsp64_wqe xmit_els_rsp;
2132 struct els_request64_wqe els_req;
2133 struct gen_req64_wqe gen_req;
2134};
2135
2136#define FCP_COMMAND 0x0
2137#define FCP_COMMAND_DATA_OUT 0x1
2138#define ELS_COMMAND_NON_FIP 0xC
2139#define ELS_COMMAND_FIP 0xD
2140#define OTHER_COMMAND 0x8
2141
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 86d1bdcbf2d8..fc67cc65c63b 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -34,8 +34,10 @@
34#include <scsi/scsi_host.h> 34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h> 35#include <scsi/scsi_transport_fc.h>
36 36
37#include "lpfc_hw4.h"
37#include "lpfc_hw.h" 38#include "lpfc_hw.h"
38#include "lpfc_sli.h" 39#include "lpfc_sli.h"
40#include "lpfc_sli4.h"
39#include "lpfc_nl.h" 41#include "lpfc_nl.h"
40#include "lpfc_disc.h" 42#include "lpfc_disc.h"
41#include "lpfc_scsi.h" 43#include "lpfc_scsi.h"
@@ -51,9 +53,23 @@ char *_dump_buf_dif;
51unsigned long _dump_buf_dif_order; 53unsigned long _dump_buf_dif_order;
52spinlock_t _dump_buf_lock; 54spinlock_t _dump_buf_lock;
53 55
54static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
55static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 56static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
56static int lpfc_post_rcv_buf(struct lpfc_hba *); 57static int lpfc_post_rcv_buf(struct lpfc_hba *);
58static int lpfc_sli4_queue_create(struct lpfc_hba *);
59static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
60static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
61static int lpfc_setup_endian_order(struct lpfc_hba *);
62static int lpfc_sli4_read_config(struct lpfc_hba *);
63static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
64static void lpfc_free_sgl_list(struct lpfc_hba *);
65static int lpfc_init_sgl_list(struct lpfc_hba *);
66static int lpfc_init_active_sgl_array(struct lpfc_hba *);
67static void lpfc_free_active_sgl(struct lpfc_hba *);
68static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
69static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
70static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
71static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
72static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
57 73
58static struct scsi_transport_template *lpfc_transport_template = NULL; 74static struct scsi_transport_template *lpfc_transport_template = NULL;
59static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 75static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -92,7 +108,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
92 return -ENOMEM; 108 return -ENOMEM;
93 } 109 }
94 110
95 mb = &pmb->mb; 111 mb = &pmb->u.mb;
96 phba->link_state = LPFC_INIT_MBX_CMDS; 112 phba->link_state = LPFC_INIT_MBX_CMDS;
97 113
98 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 114 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
@@ -205,6 +221,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
205 mb->mbxCommand, mb->mbxStatus); 221 mb->mbxCommand, mb->mbxStatus);
206 mb->un.varDmp.word_cnt = 0; 222 mb->un.varDmp.word_cnt = 0;
207 } 223 }
224 /* dump mem may return a zero when finished or we got a
225 * mailbox error, either way we are done.
226 */
227 if (mb->un.varDmp.word_cnt == 0)
228 break;
208 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 229 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
209 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 230 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
210 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 231 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
@@ -233,7 +254,7 @@ out_free_mbox:
233static void 254static void
234lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 255lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
235{ 256{
236 if (pmboxq->mb.mbxStatus == MBX_SUCCESS) 257 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
237 phba->temp_sensor_support = 1; 258 phba->temp_sensor_support = 1;
238 else 259 else
239 phba->temp_sensor_support = 0; 260 phba->temp_sensor_support = 0;
@@ -260,7 +281,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
260 /* character array used for decoding dist type. */ 281 /* character array used for decoding dist type. */
261 char dist_char[] = "nabx"; 282 char dist_char[] = "nabx";
262 283
263 if (pmboxq->mb.mbxStatus != MBX_SUCCESS) { 284 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
264 mempool_free(pmboxq, phba->mbox_mem_pool); 285 mempool_free(pmboxq, phba->mbox_mem_pool);
265 return; 286 return;
266 } 287 }
@@ -268,7 +289,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
268 prg = (struct prog_id *) &prog_id_word; 289 prg = (struct prog_id *) &prog_id_word;
269 290
270 /* word 7 contain option rom version */ 291 /* word 7 contain option rom version */
271 prog_id_word = pmboxq->mb.un.varWords[7]; 292 prog_id_word = pmboxq->u.mb.un.varWords[7];
272 293
273 /* Decode the Option rom version word to a readable string */ 294 /* Decode the Option rom version word to a readable string */
274 if (prg->dist < 4) 295 if (prg->dist < 4)
@@ -325,7 +346,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
325 phba->link_state = LPFC_HBA_ERROR; 346 phba->link_state = LPFC_HBA_ERROR;
326 return -ENOMEM; 347 return -ENOMEM;
327 } 348 }
328 mb = &pmb->mb; 349 mb = &pmb->u.mb;
329 350
330 /* Get login parameters for NID. */ 351 /* Get login parameters for NID. */
331 lpfc_read_sparam(phba, pmb, 0); 352 lpfc_read_sparam(phba, pmb, 0);
@@ -364,6 +385,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
364 /* Update the fc_host data structures with new wwn. */ 385 /* Update the fc_host data structures with new wwn. */
365 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 386 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
366 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 387 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
388 fc_host_max_npiv_vports(shost) = phba->max_vpi;
367 389
368 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 390 /* If no serial number in VPD data, use low 6 bytes of WWNN */
369 /* This should be consolidated into parse_vpd ? - mr */ 391 /* This should be consolidated into parse_vpd ? - mr */
@@ -406,7 +428,8 @@ lpfc_config_port_post(struct lpfc_hba *phba)
406 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 428 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
407 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 429 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
408 phba->cfg_hba_queue_depth = 430 phba->cfg_hba_queue_depth =
409 mb->un.varRdConfig.max_xri + 1; 431 (mb->un.varRdConfig.max_xri + 1) -
432 lpfc_sli4_get_els_iocb_cnt(phba);
410 433
411 phba->lmt = mb->un.varRdConfig.lmt; 434 phba->lmt = mb->un.varRdConfig.lmt;
412 435
@@ -460,17 +483,18 @@ lpfc_config_port_post(struct lpfc_hba *phba)
460 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 483 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
461 "0352 Config MSI mailbox command " 484 "0352 Config MSI mailbox command "
462 "failed, mbxCmd x%x, mbxStatus x%x\n", 485 "failed, mbxCmd x%x, mbxStatus x%x\n",
463 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 486 pmb->u.mb.mbxCommand,
487 pmb->u.mb.mbxStatus);
464 mempool_free(pmb, phba->mbox_mem_pool); 488 mempool_free(pmb, phba->mbox_mem_pool);
465 return -EIO; 489 return -EIO;
466 } 490 }
467 } 491 }
468 492
493 spin_lock_irq(&phba->hbalock);
469 /* Initialize ERATT handling flag */ 494 /* Initialize ERATT handling flag */
470 phba->hba_flag &= ~HBA_ERATT_HANDLED; 495 phba->hba_flag &= ~HBA_ERATT_HANDLED;
471 496
472 /* Enable appropriate host interrupts */ 497 /* Enable appropriate host interrupts */
473 spin_lock_irq(&phba->hbalock);
474 status = readl(phba->HCregaddr); 498 status = readl(phba->HCregaddr);
475 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 499 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
476 if (psli->num_rings > 0) 500 if (psli->num_rings > 0)
@@ -571,16 +595,20 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
571{ 595{
572 struct lpfc_vport **vports; 596 struct lpfc_vport **vports;
573 int i; 597 int i;
574 /* Disable interrupts */ 598
575 writel(0, phba->HCregaddr); 599 if (phba->sli_rev <= LPFC_SLI_REV3) {
576 readl(phba->HCregaddr); /* flush */ 600 /* Disable interrupts */
601 writel(0, phba->HCregaddr);
602 readl(phba->HCregaddr); /* flush */
603 }
577 604
578 if (phba->pport->load_flag & FC_UNLOADING) 605 if (phba->pport->load_flag & FC_UNLOADING)
579 lpfc_cleanup_discovery_resources(phba->pport); 606 lpfc_cleanup_discovery_resources(phba->pport);
580 else { 607 else {
581 vports = lpfc_create_vport_work_array(phba); 608 vports = lpfc_create_vport_work_array(phba);
582 if (vports != NULL) 609 if (vports != NULL)
583 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 610 for (i = 0; i <= phba->max_vports &&
611 vports[i] != NULL; i++)
584 lpfc_cleanup_discovery_resources(vports[i]); 612 lpfc_cleanup_discovery_resources(vports[i]);
585 lpfc_destroy_vport_work_array(phba, vports); 613 lpfc_destroy_vport_work_array(phba, vports);
586 } 614 }
@@ -588,7 +616,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
588} 616}
589 617
590/** 618/**
591 * lpfc_hba_down_post - Perform lpfc uninitialization after HBA reset 619 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
592 * @phba: pointer to lpfc HBA data structure. 620 * @phba: pointer to lpfc HBA data structure.
593 * 621 *
594 * This routine will do uninitialization after the HBA is reset when bring 622 * This routine will do uninitialization after the HBA is reset when bring
@@ -598,8 +626,8 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
598 * 0 - sucess. 626 * 0 - sucess.
599 * Any other value - error. 627 * Any other value - error.
600 **/ 628 **/
601int 629static int
602lpfc_hba_down_post(struct lpfc_hba *phba) 630lpfc_hba_down_post_s3(struct lpfc_hba *phba)
603{ 631{
604 struct lpfc_sli *psli = &phba->sli; 632 struct lpfc_sli *psli = &phba->sli;
605 struct lpfc_sli_ring *pring; 633 struct lpfc_sli_ring *pring;
@@ -642,6 +670,77 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
642 670
643 return 0; 671 return 0;
644} 672}
673/**
674 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
675 * @phba: pointer to lpfc HBA data structure.
676 *
677 * This routine will do uninitialization after the HBA is reset when bring
678 * down the SLI Layer.
679 *
680 * Return codes
681 * 0 - sucess.
682 * Any other value - error.
683 **/
684static int
685lpfc_hba_down_post_s4(struct lpfc_hba *phba)
686{
687 struct lpfc_scsi_buf *psb, *psb_next;
688 LIST_HEAD(aborts);
689 int ret;
690 unsigned long iflag = 0;
691 ret = lpfc_hba_down_post_s3(phba);
692 if (ret)
693 return ret;
694 /* At this point in time the HBA is either reset or DOA. Either
695 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
696 * on the lpfc_sgl_list so that it can either be freed if the
697 * driver is unloading or reposted if the driver is restarting
698 * the port.
699 */
700 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
701 /* scsl_buf_list */
702 /* abts_sgl_list_lock required because worker thread uses this
703 * list.
704 */
705 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
706 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
707 &phba->sli4_hba.lpfc_sgl_list);
708 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
709 /* abts_scsi_buf_list_lock required because worker thread uses this
710 * list.
711 */
712 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
713 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
714 &aborts);
715 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
716 spin_unlock_irq(&phba->hbalock);
717
718 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
719 psb->pCmd = NULL;
720 psb->status = IOSTAT_SUCCESS;
721 }
722 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
723 list_splice(&aborts, &phba->lpfc_scsi_buf_list);
724 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
725 return 0;
726}
727
728/**
729 * lpfc_hba_down_post - Wrapper func for hba down post routine
730 * @phba: pointer to lpfc HBA data structure.
731 *
732 * This routine wraps the actual SLI3 or SLI4 routine for performing
733 * uninitialization after the HBA is reset when bring down the SLI Layer.
734 *
735 * Return codes
736 * 0 - sucess.
737 * Any other value - error.
738 **/
739int
740lpfc_hba_down_post(struct lpfc_hba *phba)
741{
742 return (*phba->lpfc_hba_down_post)(phba);
743}
645 744
646/** 745/**
647 * lpfc_hb_timeout - The HBA-timer timeout handler 746 * lpfc_hb_timeout - The HBA-timer timeout handler
@@ -809,7 +908,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
809 "taking this port offline.\n"); 908 "taking this port offline.\n");
810 909
811 spin_lock_irq(&phba->hbalock); 910 spin_lock_irq(&phba->hbalock);
812 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 911 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
813 spin_unlock_irq(&phba->hbalock); 912 spin_unlock_irq(&phba->hbalock);
814 913
815 lpfc_offline_prep(phba); 914 lpfc_offline_prep(phba);
@@ -834,13 +933,15 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
834 struct lpfc_sli *psli = &phba->sli; 933 struct lpfc_sli *psli = &phba->sli;
835 934
836 spin_lock_irq(&phba->hbalock); 935 spin_lock_irq(&phba->hbalock);
837 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 936 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
838 spin_unlock_irq(&phba->hbalock); 937 spin_unlock_irq(&phba->hbalock);
839 lpfc_offline_prep(phba); 938 lpfc_offline_prep(phba);
840 939
841 lpfc_offline(phba); 940 lpfc_offline(phba);
842 lpfc_reset_barrier(phba); 941 lpfc_reset_barrier(phba);
942 spin_lock_irq(&phba->hbalock);
843 lpfc_sli_brdreset(phba); 943 lpfc_sli_brdreset(phba);
944 spin_unlock_irq(&phba->hbalock);
844 lpfc_hba_down_post(phba); 945 lpfc_hba_down_post(phba);
845 lpfc_sli_brdready(phba, HS_MBRDY); 946 lpfc_sli_brdready(phba, HS_MBRDY);
846 lpfc_unblock_mgmt_io(phba); 947 lpfc_unblock_mgmt_io(phba);
@@ -849,6 +950,25 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
849} 950}
850 951
851/** 952/**
953 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
954 * @phba: pointer to lpfc hba data structure.
955 *
956 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
957 * other than Port Error 6 has been detected.
958 **/
959static void
960lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
961{
962 lpfc_offline_prep(phba);
963 lpfc_offline(phba);
964 lpfc_sli4_brdreset(phba);
965 lpfc_hba_down_post(phba);
966 lpfc_sli4_post_status_check(phba);
967 lpfc_unblock_mgmt_io(phba);
968 phba->link_state = LPFC_HBA_ERROR;
969}
970
971/**
852 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 972 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
853 * @phba: pointer to lpfc hba data structure. 973 * @phba: pointer to lpfc hba data structure.
854 * 974 *
@@ -864,6 +984,16 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
864 struct lpfc_sli_ring *pring; 984 struct lpfc_sli_ring *pring;
865 struct lpfc_sli *psli = &phba->sli; 985 struct lpfc_sli *psli = &phba->sli;
866 986
987 /* If the pci channel is offline, ignore possible errors,
988 * since we cannot communicate with the pci card anyway.
989 */
990 if (pci_channel_offline(phba->pcidev)) {
991 spin_lock_irq(&phba->hbalock);
992 phba->hba_flag &= ~DEFER_ERATT;
993 spin_unlock_irq(&phba->hbalock);
994 return;
995 }
996
867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
868 "0479 Deferred Adapter Hardware Error " 998 "0479 Deferred Adapter Hardware Error "
869 "Data: x%x x%x x%x\n", 999 "Data: x%x x%x x%x\n",
@@ -871,7 +1001,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
871 phba->work_status[0], phba->work_status[1]); 1001 phba->work_status[0], phba->work_status[1]);
872 1002
873 spin_lock_irq(&phba->hbalock); 1003 spin_lock_irq(&phba->hbalock);
874 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1004 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
875 spin_unlock_irq(&phba->hbalock); 1005 spin_unlock_irq(&phba->hbalock);
876 1006
877 1007
@@ -909,13 +1039,30 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
909 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1039 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
910 phba->work_hs = old_host_status & ~HS_FFER1; 1040 phba->work_hs = old_host_status & ~HS_FFER1;
911 1041
1042 spin_lock_irq(&phba->hbalock);
912 phba->hba_flag &= ~DEFER_ERATT; 1043 phba->hba_flag &= ~DEFER_ERATT;
1044 spin_unlock_irq(&phba->hbalock);
913 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1045 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
914 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1046 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
915} 1047}
916 1048
1049static void
1050lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1051{
1052 struct lpfc_board_event_header board_event;
1053 struct Scsi_Host *shost;
1054
1055 board_event.event_type = FC_REG_BOARD_EVENT;
1056 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1057 shost = lpfc_shost_from_vport(phba->pport);
1058 fc_host_post_vendor_event(shost, fc_get_event_number(),
1059 sizeof(board_event),
1060 (char *) &board_event,
1061 LPFC_NL_VENDOR_ID);
1062}
1063
917/** 1064/**
918 * lpfc_handle_eratt - The HBA hardware error handler 1065 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
919 * @phba: pointer to lpfc hba data structure. 1066 * @phba: pointer to lpfc hba data structure.
920 * 1067 *
921 * This routine is invoked to handle the following HBA hardware error 1068 * This routine is invoked to handle the following HBA hardware error
@@ -924,8 +1071,8 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
924 * 2 - DMA ring index out of range 1071 * 2 - DMA ring index out of range
925 * 3 - Mailbox command came back as unknown 1072 * 3 - Mailbox command came back as unknown
926 **/ 1073 **/
927void 1074static void
928lpfc_handle_eratt(struct lpfc_hba *phba) 1075lpfc_handle_eratt_s3(struct lpfc_hba *phba)
929{ 1076{
930 struct lpfc_vport *vport = phba->pport; 1077 struct lpfc_vport *vport = phba->pport;
931 struct lpfc_sli *psli = &phba->sli; 1078 struct lpfc_sli *psli = &phba->sli;
@@ -934,24 +1081,23 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
934 unsigned long temperature; 1081 unsigned long temperature;
935 struct temp_event temp_event_data; 1082 struct temp_event temp_event_data;
936 struct Scsi_Host *shost; 1083 struct Scsi_Host *shost;
937 struct lpfc_board_event_header board_event;
938 1084
939 /* If the pci channel is offline, ignore possible errors, 1085 /* If the pci channel is offline, ignore possible errors,
940 * since we cannot communicate with the pci card anyway. */ 1086 * since we cannot communicate with the pci card anyway.
941 if (pci_channel_offline(phba->pcidev)) 1087 */
1088 if (pci_channel_offline(phba->pcidev)) {
1089 spin_lock_irq(&phba->hbalock);
1090 phba->hba_flag &= ~DEFER_ERATT;
1091 spin_unlock_irq(&phba->hbalock);
942 return; 1092 return;
1093 }
1094
943 /* If resets are disabled then leave the HBA alone and return */ 1095 /* If resets are disabled then leave the HBA alone and return */
944 if (!phba->cfg_enable_hba_reset) 1096 if (!phba->cfg_enable_hba_reset)
945 return; 1097 return;
946 1098
947 /* Send an internal error event to mgmt application */ 1099 /* Send an internal error event to mgmt application */
948 board_event.event_type = FC_REG_BOARD_EVENT; 1100 lpfc_board_errevt_to_mgmt(phba);
949 board_event.subcategory = LPFC_EVENT_PORTINTERR;
950 shost = lpfc_shost_from_vport(phba->pport);
951 fc_host_post_vendor_event(shost, fc_get_event_number(),
952 sizeof(board_event),
953 (char *) &board_event,
954 LPFC_NL_VENDOR_ID);
955 1101
956 if (phba->hba_flag & DEFER_ERATT) 1102 if (phba->hba_flag & DEFER_ERATT)
957 lpfc_handle_deferred_eratt(phba); 1103 lpfc_handle_deferred_eratt(phba);
@@ -965,7 +1111,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
965 phba->work_status[0], phba->work_status[1]); 1111 phba->work_status[0], phba->work_status[1]);
966 1112
967 spin_lock_irq(&phba->hbalock); 1113 spin_lock_irq(&phba->hbalock);
968 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1114 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
969 spin_unlock_irq(&phba->hbalock); 1115 spin_unlock_irq(&phba->hbalock);
970 1116
971 /* 1117 /*
@@ -1037,6 +1183,65 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
1037} 1183}
1038 1184
1039/** 1185/**
1186 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1187 * @phba: pointer to lpfc hba data structure.
1188 *
1189 * This routine is invoked to handle the SLI4 HBA hardware error attention
1190 * conditions.
1191 **/
1192static void
1193lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1194{
1195 struct lpfc_vport *vport = phba->pport;
1196 uint32_t event_data;
1197 struct Scsi_Host *shost;
1198
1199 /* If the pci channel is offline, ignore possible errors, since
1200 * we cannot communicate with the pci card anyway.
1201 */
1202 if (pci_channel_offline(phba->pcidev))
1203 return;
1204 /* If resets are disabled then leave the HBA alone and return */
1205 if (!phba->cfg_enable_hba_reset)
1206 return;
1207
1208 /* Send an internal error event to mgmt application */
1209 lpfc_board_errevt_to_mgmt(phba);
1210
1211 /* For now, the actual action for SLI4 device handling is not
1212 * specified yet, just treated it as adaptor hardware failure
1213 */
1214 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1215 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1216 phba->work_status[0], phba->work_status[1]);
1217
1218 event_data = FC_REG_DUMP_EVENT;
1219 shost = lpfc_shost_from_vport(vport);
1220 fc_host_post_vendor_event(shost, fc_get_event_number(),
1221 sizeof(event_data), (char *) &event_data,
1222 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1223
1224 lpfc_sli4_offline_eratt(phba);
1225}
1226
1227/**
1228 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1229 * @phba: pointer to lpfc HBA data structure.
1230 *
1231 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1232 * routine from the API jump table function pointer from the lpfc_hba struct.
1233 *
1234 * Return codes
1235 * 0 - sucess.
1236 * Any other value - error.
1237 **/
1238void
1239lpfc_handle_eratt(struct lpfc_hba *phba)
1240{
1241 (*phba->lpfc_handle_eratt)(phba);
1242}
1243
1244/**
1040 * lpfc_handle_latt - The HBA link event handler 1245 * lpfc_handle_latt - The HBA link event handler
1041 * @phba: pointer to lpfc hba data structure. 1246 * @phba: pointer to lpfc hba data structure.
1042 * 1247 *
@@ -1137,7 +1342,7 @@ lpfc_handle_latt_err_exit:
1137 * 0 - pointer to the VPD passed in is NULL 1342 * 0 - pointer to the VPD passed in is NULL
1138 * 1 - success 1343 * 1 - success
1139 **/ 1344 **/
1140static int 1345int
1141lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1346lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1142{ 1347{
1143 uint8_t lenlo, lenhi; 1348 uint8_t lenlo, lenhi;
@@ -1292,6 +1497,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1292 uint16_t dev_id = phba->pcidev->device; 1497 uint16_t dev_id = phba->pcidev->device;
1293 int max_speed; 1498 int max_speed;
1294 int GE = 0; 1499 int GE = 0;
1500 int oneConnect = 0; /* default is not a oneConnect */
1295 struct { 1501 struct {
1296 char * name; 1502 char * name;
1297 int max_speed; 1503 int max_speed;
@@ -1437,6 +1643,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1437 case PCI_DEVICE_ID_PROTEUS_S: 1643 case PCI_DEVICE_ID_PROTEUS_S:
1438 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1644 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1439 break; 1645 break;
1646 case PCI_DEVICE_ID_TIGERSHARK:
1647 oneConnect = 1;
1648 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
1649 break;
1440 default: 1650 default:
1441 m = (typeof(m)){ NULL }; 1651 m = (typeof(m)){ NULL };
1442 break; 1652 break;
@@ -1444,13 +1654,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1444 1654
1445 if (mdp && mdp[0] == '\0') 1655 if (mdp && mdp[0] == '\0')
1446 snprintf(mdp, 79,"%s", m.name); 1656 snprintf(mdp, 79,"%s", m.name);
1447 if (descp && descp[0] == '\0') 1657 /* oneConnect hba requires special processing, they are all initiators
1448 snprintf(descp, 255, 1658 * and we put the port number on the end
1449 "Emulex %s %d%s %s %s", 1659 */
1450 m.name, m.max_speed, 1660 if (descp && descp[0] == '\0') {
1451 (GE) ? "GE" : "Gb", 1661 if (oneConnect)
1452 m.bus, 1662 snprintf(descp, 255,
1453 (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); 1663 "Emulex OneConnect %s, FCoE Initiator, Port %s",
1664 m.name,
1665 phba->Port);
1666 else
1667 snprintf(descp, 255,
1668 "Emulex %s %d%s %s %s",
1669 m.name, m.max_speed,
1670 (GE) ? "GE" : "Gb",
1671 m.bus,
1672 (GE) ? "FCoE Adapter" :
1673 "Fibre Channel Adapter");
1674 }
1454} 1675}
1455 1676
1456/** 1677/**
@@ -1533,7 +1754,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1533 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1754 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1534 icmd->ulpLe = 1; 1755 icmd->ulpLe = 1;
1535 1756
1536 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 1757 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1758 IOCB_ERROR) {
1537 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1759 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1538 kfree(mp1); 1760 kfree(mp1);
1539 cnt++; 1761 cnt++;
@@ -1761,7 +1983,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
1761 * Lets wait for this to happen, if needed. 1983 * Lets wait for this to happen, if needed.
1762 */ 1984 */
1763 while (!list_empty(&vport->fc_nodes)) { 1985 while (!list_empty(&vport->fc_nodes)) {
1764
1765 if (i++ > 3000) { 1986 if (i++ > 3000) {
1766 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1987 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1767 "0233 Nodelist not empty\n"); 1988 "0233 Nodelist not empty\n");
@@ -1782,7 +2003,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
1782 /* Wait for any activity on ndlps to settle */ 2003 /* Wait for any activity on ndlps to settle */
1783 msleep(10); 2004 msleep(10);
1784 } 2005 }
1785 return;
1786} 2006}
1787 2007
1788/** 2008/**
@@ -1803,22 +2023,36 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
1803} 2023}
1804 2024
1805/** 2025/**
1806 * lpfc_stop_phba_timers - Stop all the timers associated with an HBA 2026 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
1807 * @phba: pointer to lpfc hba data structure. 2027 * @phba: pointer to lpfc hba data structure.
1808 * 2028 *
1809 * This routine stops all the timers associated with a HBA. This function is 2029 * This routine stops all the timers associated with a HBA. This function is
1810 * invoked before either putting a HBA offline or unloading the driver. 2030 * invoked before either putting a HBA offline or unloading the driver.
1811 **/ 2031 **/
1812static void 2032void
1813lpfc_stop_phba_timers(struct lpfc_hba *phba) 2033lpfc_stop_hba_timers(struct lpfc_hba *phba)
1814{ 2034{
1815 del_timer_sync(&phba->fcp_poll_timer);
1816 lpfc_stop_vport_timers(phba->pport); 2035 lpfc_stop_vport_timers(phba->pport);
1817 del_timer_sync(&phba->sli.mbox_tmo); 2036 del_timer_sync(&phba->sli.mbox_tmo);
1818 del_timer_sync(&phba->fabric_block_timer); 2037 del_timer_sync(&phba->fabric_block_timer);
1819 phba->hb_outstanding = 0;
1820 del_timer_sync(&phba->hb_tmofunc);
1821 del_timer_sync(&phba->eratt_poll); 2038 del_timer_sync(&phba->eratt_poll);
2039 del_timer_sync(&phba->hb_tmofunc);
2040 phba->hb_outstanding = 0;
2041
2042 switch (phba->pci_dev_grp) {
2043 case LPFC_PCI_DEV_LP:
2044 /* Stop any LightPulse device specific driver timers */
2045 del_timer_sync(&phba->fcp_poll_timer);
2046 break;
2047 case LPFC_PCI_DEV_OC:
2048 /* Stop any OneConnect device sepcific driver timers */
2049 break;
2050 default:
2051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2052 "0297 Invalid device group (x%x)\n",
2053 phba->pci_dev_grp);
2054 break;
2055 }
1822 return; 2056 return;
1823} 2057}
1824 2058
@@ -1878,14 +2112,21 @@ lpfc_online(struct lpfc_hba *phba)
1878 return 1; 2112 return 1;
1879 } 2113 }
1880 2114
1881 if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ 2115 if (phba->sli_rev == LPFC_SLI_REV4) {
1882 lpfc_unblock_mgmt_io(phba); 2116 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
1883 return 1; 2117 lpfc_unblock_mgmt_io(phba);
2118 return 1;
2119 }
2120 } else {
2121 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2122 lpfc_unblock_mgmt_io(phba);
2123 return 1;
2124 }
1884 } 2125 }
1885 2126
1886 vports = lpfc_create_vport_work_array(phba); 2127 vports = lpfc_create_vport_work_array(phba);
1887 if (vports != NULL) 2128 if (vports != NULL)
1888 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2129 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1889 struct Scsi_Host *shost; 2130 struct Scsi_Host *shost;
1890 shost = lpfc_shost_from_vport(vports[i]); 2131 shost = lpfc_shost_from_vport(vports[i]);
1891 spin_lock_irq(shost->host_lock); 2132 spin_lock_irq(shost->host_lock);
@@ -1947,11 +2188,12 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1947 /* Issue an unreg_login to all nodes on all vports */ 2188 /* Issue an unreg_login to all nodes on all vports */
1948 vports = lpfc_create_vport_work_array(phba); 2189 vports = lpfc_create_vport_work_array(phba);
1949 if (vports != NULL) { 2190 if (vports != NULL) {
1950 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2191 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1951 struct Scsi_Host *shost; 2192 struct Scsi_Host *shost;
1952 2193
1953 if (vports[i]->load_flag & FC_UNLOADING) 2194 if (vports[i]->load_flag & FC_UNLOADING)
1954 continue; 2195 continue;
2196 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
1955 shost = lpfc_shost_from_vport(vports[i]); 2197 shost = lpfc_shost_from_vport(vports[i]);
1956 list_for_each_entry_safe(ndlp, next_ndlp, 2198 list_for_each_entry_safe(ndlp, next_ndlp,
1957 &vports[i]->fc_nodes, 2199 &vports[i]->fc_nodes,
@@ -1975,7 +2217,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1975 } 2217 }
1976 lpfc_destroy_vport_work_array(phba, vports); 2218 lpfc_destroy_vport_work_array(phba, vports);
1977 2219
1978 lpfc_sli_flush_mbox_queue(phba); 2220 lpfc_sli_mbox_sys_shutdown(phba);
1979} 2221}
1980 2222
1981/** 2223/**
@@ -1996,11 +2238,11 @@ lpfc_offline(struct lpfc_hba *phba)
1996 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2238 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1997 return; 2239 return;
1998 2240
1999 /* stop all timers associated with this hba */ 2241 /* stop port and all timers associated with this hba */
2000 lpfc_stop_phba_timers(phba); 2242 lpfc_stop_port(phba);
2001 vports = lpfc_create_vport_work_array(phba); 2243 vports = lpfc_create_vport_work_array(phba);
2002 if (vports != NULL) 2244 if (vports != NULL)
2003 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 2245 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2004 lpfc_stop_vport_timers(vports[i]); 2246 lpfc_stop_vport_timers(vports[i]);
2005 lpfc_destroy_vport_work_array(phba, vports); 2247 lpfc_destroy_vport_work_array(phba, vports);
2006 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2248 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2013,7 +2255,7 @@ lpfc_offline(struct lpfc_hba *phba)
2013 spin_unlock_irq(&phba->hbalock); 2255 spin_unlock_irq(&phba->hbalock);
2014 vports = lpfc_create_vport_work_array(phba); 2256 vports = lpfc_create_vport_work_array(phba);
2015 if (vports != NULL) 2257 if (vports != NULL)
2016 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2258 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2017 shost = lpfc_shost_from_vport(vports[i]); 2259 shost = lpfc_shost_from_vport(vports[i]);
2018 spin_lock_irq(shost->host_lock); 2260 spin_lock_irq(shost->host_lock);
2019 vports[i]->work_port_events = 0; 2261 vports[i]->work_port_events = 0;
@@ -2106,6 +2348,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2106 shost->max_lun = vport->cfg_max_luns; 2348 shost->max_lun = vport->cfg_max_luns;
2107 shost->this_id = -1; 2349 shost->this_id = -1;
2108 shost->max_cmd_len = 16; 2350 shost->max_cmd_len = 16;
2351 if (phba->sli_rev == LPFC_SLI_REV4) {
2352 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
2353 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2354 }
2109 2355
2110 /* 2356 /*
2111 * Set initial can_queue value since 0 is no longer supported and 2357 * Set initial can_queue value since 0 is no longer supported and
@@ -2123,6 +2369,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2123 2369
2124 /* Initialize all internally managed lists. */ 2370 /* Initialize all internally managed lists. */
2125 INIT_LIST_HEAD(&vport->fc_nodes); 2371 INIT_LIST_HEAD(&vport->fc_nodes);
2372 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2126 spin_lock_init(&vport->work_port_lock); 2373 spin_lock_init(&vport->work_port_lock);
2127 2374
2128 init_timer(&vport->fc_disctmo); 2375 init_timer(&vport->fc_disctmo);
@@ -2314,15 +2561,3462 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2314} 2561}
2315 2562
2316/** 2563/**
2317 * lpfc_enable_msix - Enable MSI-X interrupt mode 2564 * lpfc_stop_port_s3 - Stop SLI3 device port
2565 * @phba: pointer to lpfc hba data structure.
2566 *
2567 * This routine is invoked to stop an SLI3 device port, it stops the device
2568 * from generating interrupts and stops the device driver's timers for the
2569 * device.
2570 **/
2571static void
2572lpfc_stop_port_s3(struct lpfc_hba *phba)
2573{
2574 /* Clear all interrupt enable conditions */
2575 writel(0, phba->HCregaddr);
2576 readl(phba->HCregaddr); /* flush */
2577 /* Clear all pending interrupts */
2578 writel(0xffffffff, phba->HAregaddr);
2579 readl(phba->HAregaddr); /* flush */
2580
2581 /* Reset some HBA SLI setup states */
2582 lpfc_stop_hba_timers(phba);
2583 phba->pport->work_port_events = 0;
2584}
2585
2586/**
2587 * lpfc_stop_port_s4 - Stop SLI4 device port
2588 * @phba: pointer to lpfc hba data structure.
2589 *
2590 * This routine is invoked to stop an SLI4 device port, it stops the device
2591 * from generating interrupts and stops the device driver's timers for the
2592 * device.
2593 **/
2594static void
2595lpfc_stop_port_s4(struct lpfc_hba *phba)
2596{
2597 /* Reset some HBA SLI4 setup states */
2598 lpfc_stop_hba_timers(phba);
2599 phba->pport->work_port_events = 0;
2600 phba->sli4_hba.intr_enable = 0;
2601 /* Hard clear it for now, shall have more graceful way to wait later */
2602 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2603}
2604
2605/**
2606 * lpfc_stop_port - Wrapper function for stopping hba port
2607 * @phba: Pointer to HBA context object.
2608 *
2609 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2610 * the API jump table function pointer from the lpfc_hba struct.
2611 **/
2612void
2613lpfc_stop_port(struct lpfc_hba *phba)
2614{
2615 phba->lpfc_stop_port(phba);
2616}
2617
2618/**
2619 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2620 * @phba: pointer to lpfc hba data structure.
2621 *
2622 * This routine is invoked to remove the driver default fcf record from
2623 * the port. This routine currently acts on FCF Index 0.
2624 *
2625 **/
2626void
2627lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2628{
2629 int rc = 0;
2630 LPFC_MBOXQ_t *mboxq;
2631 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2632 uint32_t mbox_tmo, req_len;
2633 uint32_t shdr_status, shdr_add_status;
2634
2635 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2636 if (!mboxq) {
2637 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2638 "2020 Failed to allocate mbox for ADD_FCF cmd\n");
2639 return;
2640 }
2641
2642 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2643 sizeof(struct lpfc_sli4_cfg_mhdr);
2644 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2645 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2646 req_len, LPFC_SLI4_MBX_EMBED);
2647 /*
2648 * In phase 1, there is a single FCF index, 0. In phase2, the driver
2649 * supports multiple FCF indices.
2650 */
2651 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2652 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2653 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2654 phba->fcf.fcf_indx);
2655
2656 if (!phba->sli4_hba.intr_enable)
2657 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2658 else {
2659 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2660 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2661 }
2662 /* The IOCTL status is embedded in the mailbox subheader. */
2663 shdr_status = bf_get(lpfc_mbox_hdr_status,
2664 &del_fcf_record->header.cfg_shdr.response);
2665 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2666 &del_fcf_record->header.cfg_shdr.response);
2667 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2668 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2669 "2516 DEL FCF of default FCF Index failed "
2670 "mbx status x%x, status x%x add_status x%x\n",
2671 rc, shdr_status, shdr_add_status);
2672 }
2673 if (rc != MBX_TIMEOUT)
2674 mempool_free(mboxq, phba->mbox_mem_pool);
2675}
2676
2677/**
2678 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2679 * @phba: pointer to lpfc hba data structure.
2680 * @acqe_link: pointer to the async link completion queue entry.
2681 *
2682 * This routine is to parse the SLI4 link-attention link fault code and
2683 * translate it into the base driver's read link attention mailbox command
2684 * status.
2685 *
2686 * Return: Link-attention status in terms of base driver's coding.
2687 **/
2688static uint16_t
2689lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
2690 struct lpfc_acqe_link *acqe_link)
2691{
2692 uint16_t latt_fault;
2693
2694 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
2695 case LPFC_ASYNC_LINK_FAULT_NONE:
2696 case LPFC_ASYNC_LINK_FAULT_LOCAL:
2697 case LPFC_ASYNC_LINK_FAULT_REMOTE:
2698 latt_fault = 0;
2699 break;
2700 default:
2701 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2702 "0398 Invalid link fault code: x%x\n",
2703 bf_get(lpfc_acqe_link_fault, acqe_link));
2704 latt_fault = MBXERR_ERROR;
2705 break;
2706 }
2707 return latt_fault;
2708}
2709
2710/**
2711 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
2712 * @phba: pointer to lpfc hba data structure.
2713 * @acqe_link: pointer to the async link completion queue entry.
2714 *
2715 * This routine is to parse the SLI4 link attention type and translate it
2716 * into the base driver's link attention type coding.
2717 *
2718 * Return: Link attention type in terms of base driver's coding.
2719 **/
2720static uint8_t
2721lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
2722 struct lpfc_acqe_link *acqe_link)
2723{
2724 uint8_t att_type;
2725
2726 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
2727 case LPFC_ASYNC_LINK_STATUS_DOWN:
2728 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
2729 att_type = AT_LINK_DOWN;
2730 break;
2731 case LPFC_ASYNC_LINK_STATUS_UP:
2732 /* Ignore physical link up events - wait for logical link up */
2733 att_type = AT_RESERVED;
2734 break;
2735 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
2736 att_type = AT_LINK_UP;
2737 break;
2738 default:
2739 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2740 "0399 Invalid link attention type: x%x\n",
2741 bf_get(lpfc_acqe_link_status, acqe_link));
2742 att_type = AT_RESERVED;
2743 break;
2744 }
2745 return att_type;
2746}
2747
2748/**
2749 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
2750 * @phba: pointer to lpfc hba data structure.
2751 * @acqe_link: pointer to the async link completion queue entry.
2752 *
2753 * This routine is to parse the SLI4 link-attention link speed and translate
2754 * it into the base driver's link-attention link speed coding.
2755 *
2756 * Return: Link-attention link speed in terms of base driver's coding.
2757 **/
2758static uint8_t
2759lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
2760 struct lpfc_acqe_link *acqe_link)
2761{
2762 uint8_t link_speed;
2763
2764 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
2765 case LPFC_ASYNC_LINK_SPEED_ZERO:
2766 link_speed = LA_UNKNW_LINK;
2767 break;
2768 case LPFC_ASYNC_LINK_SPEED_10MBPS:
2769 link_speed = LA_UNKNW_LINK;
2770 break;
2771 case LPFC_ASYNC_LINK_SPEED_100MBPS:
2772 link_speed = LA_UNKNW_LINK;
2773 break;
2774 case LPFC_ASYNC_LINK_SPEED_1GBPS:
2775 link_speed = LA_1GHZ_LINK;
2776 break;
2777 case LPFC_ASYNC_LINK_SPEED_10GBPS:
2778 link_speed = LA_10GHZ_LINK;
2779 break;
2780 default:
2781 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2782 "0483 Invalid link-attention link speed: x%x\n",
2783 bf_get(lpfc_acqe_link_speed, acqe_link));
2784 link_speed = LA_UNKNW_LINK;
2785 break;
2786 }
2787 return link_speed;
2788}
2789
2790/**
2791 * lpfc_sli4_async_link_evt - Process the asynchronous link event
2792 * @phba: pointer to lpfc hba data structure.
2793 * @acqe_link: pointer to the async link completion queue entry.
2794 *
2795 * This routine is to handle the SLI4 asynchronous link event.
2796 **/
2797static void
2798lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2799 struct lpfc_acqe_link *acqe_link)
2800{
2801 struct lpfc_dmabuf *mp;
2802 LPFC_MBOXQ_t *pmb;
2803 MAILBOX_t *mb;
2804 READ_LA_VAR *la;
2805 uint8_t att_type;
2806
2807 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
2808 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
2809 return;
2810 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2811 if (!pmb) {
2812 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2813 "0395 The mboxq allocation failed\n");
2814 return;
2815 }
2816 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2817 if (!mp) {
2818 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2819 "0396 The lpfc_dmabuf allocation failed\n");
2820 goto out_free_pmb;
2821 }
2822 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2823 if (!mp->virt) {
2824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2825 "0397 The mbuf allocation failed\n");
2826 goto out_free_dmabuf;
2827 }
2828
2829 /* Cleanup any outstanding ELS commands */
2830 lpfc_els_flush_all_cmd(phba);
2831
2832 /* Block ELS IOCBs until we have done process link event */
2833 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2834
2835 /* Update link event statistics */
2836 phba->sli.slistat.link_event++;
2837
2838 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
2839 lpfc_read_la(phba, pmb, mp);
2840 pmb->vport = phba->pport;
2841
2842 /* Parse and translate status field */
2843 mb = &pmb->u.mb;
2844 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
2845
2846 /* Parse and translate link attention fields */
2847 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
2848 la->eventTag = acqe_link->event_tag;
2849 la->attType = att_type;
2850 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
2851
2852 /* Fake the the following irrelvant fields */
2853 la->topology = TOPOLOGY_PT_PT;
2854 la->granted_AL_PA = 0;
2855 la->il = 0;
2856 la->pb = 0;
2857 la->fa = 0;
2858 la->mm = 0;
2859
2860 /* Keep the link status for extra SLI4 state machine reference */
2861 phba->sli4_hba.link_state.speed =
2862 bf_get(lpfc_acqe_link_speed, acqe_link);
2863 phba->sli4_hba.link_state.duplex =
2864 bf_get(lpfc_acqe_link_duplex, acqe_link);
2865 phba->sli4_hba.link_state.status =
2866 bf_get(lpfc_acqe_link_status, acqe_link);
2867 phba->sli4_hba.link_state.physical =
2868 bf_get(lpfc_acqe_link_physical, acqe_link);
2869 phba->sli4_hba.link_state.fault =
2870 bf_get(lpfc_acqe_link_fault, acqe_link);
2871
2872 /* Invoke the lpfc_handle_latt mailbox command callback function */
2873 lpfc_mbx_cmpl_read_la(phba, pmb);
2874
2875 return;
2876
2877out_free_dmabuf:
2878 kfree(mp);
2879out_free_pmb:
2880 mempool_free(pmb, phba->mbox_mem_pool);
2881}
2882
2883/**
2884 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2885 * @phba: pointer to lpfc hba data structure.
2886 * @acqe_link: pointer to the async fcoe completion queue entry.
2887 *
2888 * This routine is to handle the SLI4 asynchronous fcoe event.
2889 **/
2890static void
2891lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2892 struct lpfc_acqe_fcoe *acqe_fcoe)
2893{
2894 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2895 int rc;
2896
2897 switch (event_type) {
2898 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2899 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2900 "2546 New FCF found index 0x%x tag 0x%x \n",
2901 acqe_fcoe->fcf_index,
2902 acqe_fcoe->event_tag);
2903 /*
2904 * If the current FCF is in discovered state,
2905 * do nothing.
2906 */
2907 spin_lock_irq(&phba->hbalock);
2908 if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
2909 spin_unlock_irq(&phba->hbalock);
2910 break;
2911 }
2912 spin_unlock_irq(&phba->hbalock);
2913
2914 /* Read the FCF table and re-discover SAN. */
2915 rc = lpfc_sli4_read_fcf_record(phba,
2916 LPFC_FCOE_FCF_GET_FIRST);
2917 if (rc)
2918 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2919 "2547 Read FCF record failed 0x%x\n",
2920 rc);
2921 break;
2922
2923 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
2924 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2925 "2548 FCF Table full count 0x%x tag 0x%x \n",
2926 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
2927 acqe_fcoe->event_tag);
2928 break;
2929
2930 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2931 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2932 "2549 FCF disconnected fron network index 0x%x"
2933 " tag 0x%x \n", acqe_fcoe->fcf_index,
2934 acqe_fcoe->event_tag);
2935 /* If the event is not for currently used fcf do nothing */
2936 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
2937 break;
2938 /*
2939 * Currently, driver support only one FCF - so treat this as
2940 * a link down.
2941 */
2942 lpfc_linkdown(phba);
2943 /* Unregister FCF if no devices connected to it */
2944 lpfc_unregister_unused_fcf(phba);
2945 break;
2946
2947 default:
2948 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2949 "0288 Unknown FCoE event type 0x%x event tag "
2950 "0x%x\n", event_type, acqe_fcoe->event_tag);
2951 break;
2952 }
2953}
2954
2955/**
2956 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
2957 * @phba: pointer to lpfc hba data structure.
2958 * @acqe_link: pointer to the async dcbx completion queue entry.
2959 *
2960 * This routine is to handle the SLI4 asynchronous dcbx event.
2961 **/
2962static void
2963lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2964 struct lpfc_acqe_dcbx *acqe_dcbx)
2965{
2966 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2967 "0290 The SLI4 DCBX asynchronous event is not "
2968 "handled yet\n");
2969}
2970
2971/**
2972 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
2973 * @phba: pointer to lpfc hba data structure.
2974 *
2975 * This routine is invoked by the worker thread to process all the pending
2976 * SLI4 asynchronous events.
2977 **/
2978void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
2979{
2980 struct lpfc_cq_event *cq_event;
2981
2982 /* First, declare the async event has been handled */
2983 spin_lock_irq(&phba->hbalock);
2984 phba->hba_flag &= ~ASYNC_EVENT;
2985 spin_unlock_irq(&phba->hbalock);
2986 /* Now, handle all the async events */
2987 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
2988 /* Get the first event from the head of the event queue */
2989 spin_lock_irq(&phba->hbalock);
2990 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
2991 cq_event, struct lpfc_cq_event, list);
2992 spin_unlock_irq(&phba->hbalock);
2993 /* Process the asynchronous event */
2994 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
2995 case LPFC_TRAILER_CODE_LINK:
2996 lpfc_sli4_async_link_evt(phba,
2997 &cq_event->cqe.acqe_link);
2998 break;
2999 case LPFC_TRAILER_CODE_FCOE:
3000 lpfc_sli4_async_fcoe_evt(phba,
3001 &cq_event->cqe.acqe_fcoe);
3002 break;
3003 case LPFC_TRAILER_CODE_DCBX:
3004 lpfc_sli4_async_dcbx_evt(phba,
3005 &cq_event->cqe.acqe_dcbx);
3006 break;
3007 default:
3008 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3009 "1804 Invalid asynchrous event code: "
3010 "x%x\n", bf_get(lpfc_trailer_code,
3011 &cq_event->cqe.mcqe_cmpl));
3012 break;
3013 }
3014 /* Free the completion event processed to the free pool */
3015 lpfc_sli4_cq_event_release(phba, cq_event);
3016 }
3017}
3018
3019/**
3020 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3021 * @phba: pointer to lpfc hba data structure.
3022 * @dev_grp: The HBA PCI-Device group number.
3023 *
3024 * This routine is invoked to set up the per HBA PCI-Device group function
3025 * API jump table entries.
3026 *
3027 * Return: 0 if success, otherwise -ENODEV
3028 **/
3029int
3030lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3031{
3032 int rc;
3033
3034 /* Set up lpfc PCI-device group */
3035 phba->pci_dev_grp = dev_grp;
3036
3037 /* The LPFC_PCI_DEV_OC uses SLI4 */
3038 if (dev_grp == LPFC_PCI_DEV_OC)
3039 phba->sli_rev = LPFC_SLI_REV4;
3040
3041 /* Set up device INIT API function jump table */
3042 rc = lpfc_init_api_table_setup(phba, dev_grp);
3043 if (rc)
3044 return -ENODEV;
3045 /* Set up SCSI API function jump table */
3046 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3047 if (rc)
3048 return -ENODEV;
3049 /* Set up SLI API function jump table */
3050 rc = lpfc_sli_api_table_setup(phba, dev_grp);
3051 if (rc)
3052 return -ENODEV;
3053 /* Set up MBOX API function jump table */
3054 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3055 if (rc)
3056 return -ENODEV;
3057
3058 return 0;
3059}
3060
3061/**
3062 * lpfc_log_intr_mode - Log the active interrupt mode
3063 * @phba: pointer to lpfc hba data structure.
3064 * @intr_mode: active interrupt mode adopted.
3065 *
3066 * This routine it invoked to log the currently used active interrupt mode
3067 * to the device.
3068 **/
3069static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3070{
3071 switch (intr_mode) {
3072 case 0:
3073 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3074 "0470 Enable INTx interrupt mode.\n");
3075 break;
3076 case 1:
3077 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3078 "0481 Enabled MSI interrupt mode.\n");
3079 break;
3080 case 2:
3081 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3082 "0480 Enabled MSI-X interrupt mode.\n");
3083 break;
3084 default:
3085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3086 "0482 Illegal interrupt mode.\n");
3087 break;
3088 }
3089 return;
3090}
3091
3092/**
3093 * lpfc_enable_pci_dev - Enable a generic PCI device.
3094 * @phba: pointer to lpfc hba data structure.
3095 *
3096 * This routine is invoked to enable the PCI device that is common to all
3097 * PCI devices.
3098 *
3099 * Return codes
3100 * 0 - sucessful
3101 * other values - error
3102 **/
3103static int
3104lpfc_enable_pci_dev(struct lpfc_hba *phba)
3105{
3106 struct pci_dev *pdev;
3107 int bars;
3108
3109 /* Obtain PCI device reference */
3110 if (!phba->pcidev)
3111 goto out_error;
3112 else
3113 pdev = phba->pcidev;
3114 /* Select PCI BARs */
3115 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3116 /* Enable PCI device */
3117 if (pci_enable_device_mem(pdev))
3118 goto out_error;
3119 /* Request PCI resource for the device */
3120 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3121 goto out_disable_device;
3122 /* Set up device as PCI master and save state for EEH */
3123 pci_set_master(pdev);
3124 pci_try_set_mwi(pdev);
3125 pci_save_state(pdev);
3126
3127 return 0;
3128
3129out_disable_device:
3130 pci_disable_device(pdev);
3131out_error:
3132 return -ENODEV;
3133}
3134
3135/**
3136 * lpfc_disable_pci_dev - Disable a generic PCI device.
3137 * @phba: pointer to lpfc hba data structure.
3138 *
3139 * This routine is invoked to disable the PCI device that is common to all
3140 * PCI devices.
3141 **/
3142static void
3143lpfc_disable_pci_dev(struct lpfc_hba *phba)
3144{
3145 struct pci_dev *pdev;
3146 int bars;
3147
3148 /* Obtain PCI device reference */
3149 if (!phba->pcidev)
3150 return;
3151 else
3152 pdev = phba->pcidev;
3153 /* Select PCI BARs */
3154 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3155 /* Release PCI resource and disable PCI device */
3156 pci_release_selected_regions(pdev, bars);
3157 pci_disable_device(pdev);
3158 /* Null out PCI private reference to driver */
3159 pci_set_drvdata(pdev, NULL);
3160
3161 return;
3162}
3163
3164/**
3165 * lpfc_reset_hba - Reset a hba
3166 * @phba: pointer to lpfc hba data structure.
3167 *
3168 * This routine is invoked to reset a hba device. It brings the HBA
3169 * offline, performs a board restart, and then brings the board back
3170 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3171 * on outstanding mailbox commands.
3172 **/
3173void
3174lpfc_reset_hba(struct lpfc_hba *phba)
3175{
3176 /* If resets are disabled then set error state and return. */
3177 if (!phba->cfg_enable_hba_reset) {
3178 phba->link_state = LPFC_HBA_ERROR;
3179 return;
3180 }
3181 lpfc_offline_prep(phba);
3182 lpfc_offline(phba);
3183 lpfc_sli_brdrestart(phba);
3184 lpfc_online(phba);
3185 lpfc_unblock_mgmt_io(phba);
3186}
3187
3188/**
3189 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3190 * @phba: pointer to lpfc hba data structure.
3191 *
3192 * This routine is invoked to set up the driver internal resources specific to
3193 * support the SLI-3 HBA device it attached to.
3194 *
3195 * Return codes
3196 * 0 - sucessful
3197 * other values - error
3198 **/
3199static int
3200lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3201{
3202 struct lpfc_sli *psli;
3203
3204 /*
3205 * Initialize timers used by driver
3206 */
3207
3208 /* Heartbeat timer */
3209 init_timer(&phba->hb_tmofunc);
3210 phba->hb_tmofunc.function = lpfc_hb_timeout;
3211 phba->hb_tmofunc.data = (unsigned long)phba;
3212
3213 psli = &phba->sli;
3214 /* MBOX heartbeat timer */
3215 init_timer(&psli->mbox_tmo);
3216 psli->mbox_tmo.function = lpfc_mbox_timeout;
3217 psli->mbox_tmo.data = (unsigned long) phba;
3218 /* FCP polling mode timer */
3219 init_timer(&phba->fcp_poll_timer);
3220 phba->fcp_poll_timer.function = lpfc_poll_timeout;
3221 phba->fcp_poll_timer.data = (unsigned long) phba;
3222 /* Fabric block timer */
3223 init_timer(&phba->fabric_block_timer);
3224 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3225 phba->fabric_block_timer.data = (unsigned long) phba;
3226 /* EA polling mode timer */
3227 init_timer(&phba->eratt_poll);
3228 phba->eratt_poll.function = lpfc_poll_eratt;
3229 phba->eratt_poll.data = (unsigned long) phba;
3230
3231 /* Host attention work mask setup */
3232 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3233 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3234
3235 /* Get all the module params for configuring this host */
3236 lpfc_get_cfgparam(phba);
3237 /*
3238 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3239 * used to create the sg_dma_buf_pool must be dynamically calculated.
3240 * 2 segments are added since the IOCB needs a command and response bde.
3241 */
3242 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3243 sizeof(struct fcp_rsp) +
3244 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3245
3246 if (phba->cfg_enable_bg) {
3247 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3248 phba->cfg_sg_dma_buf_size +=
3249 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3250 }
3251
3252 /* Also reinitialize the host templates with new values. */
3253 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3254 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3255
3256 phba->max_vpi = LPFC_MAX_VPI;
3257 /* This will be set to correct value after config_port mbox */
3258 phba->max_vports = 0;
3259
3260 /*
3261 * Initialize the SLI Layer to run with lpfc HBAs.
3262 */
3263 lpfc_sli_setup(phba);
3264 lpfc_sli_queue_setup(phba);
3265
3266 /* Allocate device driver memory */
3267 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3268 return -ENOMEM;
3269
3270 return 0;
3271}
3272
3273/**
3274 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3275 * @phba: pointer to lpfc hba data structure.
3276 *
3277 * This routine is invoked to unset the driver internal resources set up
3278 * specific for supporting the SLI-3 HBA device it attached to.
3279 **/
3280static void
3281lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3282{
3283 /* Free device driver memory allocated */
3284 lpfc_mem_free_all(phba);
3285
3286 return;
3287}
3288
3289/**
3290 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3291 * @phba: pointer to lpfc hba data structure.
3292 *
3293 * This routine is invoked to set up the driver internal resources specific to
3294 * support the SLI-4 HBA device it attached to.
3295 *
3296 * Return codes
3297 * 0 - sucessful
3298 * other values - error
3299 **/
3300static int
3301lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3302{
3303 struct lpfc_sli *psli;
3304 int rc;
3305 int i, hbq_count;
3306
3307 /* Before proceed, wait for POST done and device ready */
3308 rc = lpfc_sli4_post_status_check(phba);
3309 if (rc)
3310 return -ENODEV;
3311
3312 /*
3313 * Initialize timers used by driver
3314 */
3315
3316 /* Heartbeat timer */
3317 init_timer(&phba->hb_tmofunc);
3318 phba->hb_tmofunc.function = lpfc_hb_timeout;
3319 phba->hb_tmofunc.data = (unsigned long)phba;
3320
3321 psli = &phba->sli;
3322 /* MBOX heartbeat timer */
3323 init_timer(&psli->mbox_tmo);
3324 psli->mbox_tmo.function = lpfc_mbox_timeout;
3325 psli->mbox_tmo.data = (unsigned long) phba;
3326 /* Fabric block timer */
3327 init_timer(&phba->fabric_block_timer);
3328 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3329 phba->fabric_block_timer.data = (unsigned long) phba;
3330 /* EA polling mode timer */
3331 init_timer(&phba->eratt_poll);
3332 phba->eratt_poll.function = lpfc_poll_eratt;
3333 phba->eratt_poll.data = (unsigned long) phba;
3334 /*
3335 * We need to do a READ_CONFIG mailbox command here before
3336 * calling lpfc_get_cfgparam. For VFs this will report the
3337 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3338 * All of the resources allocated
3339 * for this Port are tied to these values.
3340 */
3341 /* Get all the module params for configuring this host */
3342 lpfc_get_cfgparam(phba);
3343 phba->max_vpi = LPFC_MAX_VPI;
3344 /* This will be set to correct value after the read_config mbox */
3345 phba->max_vports = 0;
3346
3347 /* Program the default value of vlan_id and fc_map */
3348 phba->valid_vlan = 0;
3349 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3350 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3351 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3352
3353 /*
3354 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3355 * used to create the sg_dma_buf_pool must be dynamically calculated.
3356 * 2 segments are added since the IOCB needs a command and response bde.
3357 * To insure that the scsi sgl does not cross a 4k page boundary only
3358 * sgl sizes of 1k, 2k, 4k, and 8k are supported.
3359 * Table of sgl sizes and seg_cnt:
3360 * sgl size, sg_seg_cnt total seg
3361 * 1k 50 52
3362 * 2k 114 116
3363 * 4k 242 244
3364 * 8k 498 500
3365 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3366 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3367 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3368 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3369 */
3370 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
3371 phba->cfg_sg_seg_cnt = 50;
3372 else if (phba->cfg_sg_seg_cnt <= 114)
3373 phba->cfg_sg_seg_cnt = 114;
3374 else if (phba->cfg_sg_seg_cnt <= 242)
3375 phba->cfg_sg_seg_cnt = 242;
3376 else
3377 phba->cfg_sg_seg_cnt = 498;
3378
3379 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
3380 + sizeof(struct fcp_rsp);
3381 phba->cfg_sg_dma_buf_size +=
3382 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
3383
3384 /* Initialize buffer queue management fields */
3385 hbq_count = lpfc_sli_hbq_count();
3386 for (i = 0; i < hbq_count; ++i)
3387 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3388 INIT_LIST_HEAD(&phba->rb_pend_list);
3389 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3390 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3391
3392 /*
3393 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3394 */
3395 /* Initialize the Abort scsi buffer list used by driver */
3396 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3397 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3398 /* This abort list used by worker thread */
3399 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3400
3401 /*
3402 * Initialize dirver internal slow-path work queues
3403 */
3404
3405 /* Driver internel slow-path CQ Event pool */
3406 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3407 /* Response IOCB work queue list */
3408 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
3409 /* Asynchronous event CQ Event work queue list */
3410 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3411 /* Fast-path XRI aborted CQ Event work queue list */
3412 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
3413 /* Slow-path XRI aborted CQ Event work queue list */
3414 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
3415 /* Receive queue CQ Event work queue list */
3416 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
3417
3418 /* Initialize the driver internal SLI layer lists. */
3419 lpfc_sli_setup(phba);
3420 lpfc_sli_queue_setup(phba);
3421
3422 /* Allocate device driver memory */
3423 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
3424 if (rc)
3425 return -ENOMEM;
3426
3427 /* Create the bootstrap mailbox command */
3428 rc = lpfc_create_bootstrap_mbox(phba);
3429 if (unlikely(rc))
3430 goto out_free_mem;
3431
3432 /* Set up the host's endian order with the device. */
3433 rc = lpfc_setup_endian_order(phba);
3434 if (unlikely(rc))
3435 goto out_free_bsmbx;
3436
3437 /* Set up the hba's configuration parameters. */
3438 rc = lpfc_sli4_read_config(phba);
3439 if (unlikely(rc))
3440 goto out_free_bsmbx;
3441
3442 /* Perform a function reset */
3443 rc = lpfc_pci_function_reset(phba);
3444 if (unlikely(rc))
3445 goto out_free_bsmbx;
3446
3447 /* Create all the SLI4 queues */
3448 rc = lpfc_sli4_queue_create(phba);
3449 if (rc)
3450 goto out_free_bsmbx;
3451
3452 /* Create driver internal CQE event pool */
3453 rc = lpfc_sli4_cq_event_pool_create(phba);
3454 if (rc)
3455 goto out_destroy_queue;
3456
3457 /* Initialize and populate the iocb list per host */
3458 rc = lpfc_init_sgl_list(phba);
3459 if (rc) {
3460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3461 "1400 Failed to initialize sgl list.\n");
3462 goto out_destroy_cq_event_pool;
3463 }
3464 rc = lpfc_init_active_sgl_array(phba);
3465 if (rc) {
3466 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3467 "1430 Failed to initialize sgl list.\n");
3468 goto out_free_sgl_list;
3469 }
3470
3471 rc = lpfc_sli4_init_rpi_hdrs(phba);
3472 if (rc) {
3473 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3474 "1432 Failed to initialize rpi headers.\n");
3475 goto out_free_active_sgl;
3476 }
3477
3478 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3479 phba->cfg_fcp_eq_count), GFP_KERNEL);
3480 if (!phba->sli4_hba.fcp_eq_hdl) {
3481 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3482 "2572 Failed allocate memory for fast-path "
3483 "per-EQ handle array\n");
3484 goto out_remove_rpi_hdrs;
3485 }
3486
3487 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
3488 phba->sli4_hba.cfg_eqn), GFP_KERNEL);
3489 if (!phba->sli4_hba.msix_entries) {
3490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3491 "2573 Failed allocate memory for msi-x "
3492 "interrupt vector entries\n");
3493 goto out_free_fcp_eq_hdl;
3494 }
3495
3496 return rc;
3497
3498out_free_fcp_eq_hdl:
3499 kfree(phba->sli4_hba.fcp_eq_hdl);
3500out_remove_rpi_hdrs:
3501 lpfc_sli4_remove_rpi_hdrs(phba);
3502out_free_active_sgl:
3503 lpfc_free_active_sgl(phba);
3504out_free_sgl_list:
3505 lpfc_free_sgl_list(phba);
3506out_destroy_cq_event_pool:
3507 lpfc_sli4_cq_event_pool_destroy(phba);
3508out_destroy_queue:
3509 lpfc_sli4_queue_destroy(phba);
3510out_free_bsmbx:
3511 lpfc_destroy_bootstrap_mbox(phba);
3512out_free_mem:
3513 lpfc_mem_free(phba);
3514 return rc;
3515}
3516
3517/**
3518 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
3519 * @phba: pointer to lpfc hba data structure.
3520 *
3521 * This routine is invoked to unset the driver internal resources set up
3522 * specific for supporting the SLI-4 HBA device it attached to.
3523 **/
3524static void
3525lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3526{
3527 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
3528
3529 /* unregister default FCFI from the HBA */
3530 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
3531
3532 /* Free the default FCR table */
3533 lpfc_sli_remove_dflt_fcf(phba);
3534
3535 /* Free memory allocated for msi-x interrupt vector entries */
3536 kfree(phba->sli4_hba.msix_entries);
3537
3538 /* Free memory allocated for fast-path work queue handles */
3539 kfree(phba->sli4_hba.fcp_eq_hdl);
3540
3541 /* Free the allocated rpi headers. */
3542 lpfc_sli4_remove_rpi_hdrs(phba);
3543 lpfc_sli4_remove_rpis(phba);
3544
3545 /* Free the ELS sgl list */
3546 lpfc_free_active_sgl(phba);
3547 lpfc_free_sgl_list(phba);
3548
3549 /* Free the SCSI sgl management array */
3550 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3551
3552 /* Free the SLI4 queues */
3553 lpfc_sli4_queue_destroy(phba);
3554
3555 /* Free the completion queue EQ event pool */
3556 lpfc_sli4_cq_event_release_all(phba);
3557 lpfc_sli4_cq_event_pool_destroy(phba);
3558
3559 /* Reset SLI4 HBA FCoE function */
3560 lpfc_pci_function_reset(phba);
3561
3562 /* Free the bsmbx region. */
3563 lpfc_destroy_bootstrap_mbox(phba);
3564
3565 /* Free the SLI Layer memory with SLI4 HBAs */
3566 lpfc_mem_free_all(phba);
3567
3568 /* Free the current connect table */
3569 list_for_each_entry_safe(conn_entry, next_conn_entry,
3570 &phba->fcf_conn_rec_list, list)
3571 kfree(conn_entry);
3572
3573 return;
3574}
3575
3576/**
3577 * lpfc_init_api_table_setup - Set up init api fucntion jump table
3578 * @phba: The hba struct for which this call is being executed.
3579 * @dev_grp: The HBA PCI-Device group number.
3580 *
3581 * This routine sets up the device INIT interface API function jump table
3582 * in @phba struct.
3583 *
3584 * Returns: 0 - success, -ENODEV - failure.
3585 **/
3586int
3587lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3588{
3589 switch (dev_grp) {
3590 case LPFC_PCI_DEV_LP:
3591 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
3592 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
3593 phba->lpfc_stop_port = lpfc_stop_port_s3;
3594 break;
3595 case LPFC_PCI_DEV_OC:
3596 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
3597 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
3598 phba->lpfc_stop_port = lpfc_stop_port_s4;
3599 break;
3600 default:
3601 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3602 "1431 Invalid HBA PCI-device group: 0x%x\n",
3603 dev_grp);
3604 return -ENODEV;
3605 break;
3606 }
3607 return 0;
3608}
3609
3610/**
3611 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3612 * @phba: pointer to lpfc hba data structure.
3613 *
3614 * This routine is invoked to set up the driver internal resources before the
3615 * device specific resource setup to support the HBA device it attached to.
3616 *
3617 * Return codes
3618 * 0 - sucessful
3619 * other values - error
3620 **/
3621static int
3622lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3623{
3624 /*
3625 * Driver resources common to all SLI revisions
3626 */
3627 atomic_set(&phba->fast_event_count, 0);
3628 spin_lock_init(&phba->hbalock);
3629
3630 /* Initialize ndlp management spinlock */
3631 spin_lock_init(&phba->ndlp_lock);
3632
3633 INIT_LIST_HEAD(&phba->port_list);
3634 INIT_LIST_HEAD(&phba->work_list);
3635 init_waitqueue_head(&phba->wait_4_mlo_m_q);
3636
3637 /* Initialize the wait queue head for the kernel thread */
3638 init_waitqueue_head(&phba->work_waitq);
3639
3640 /* Initialize the scsi buffer list used by driver for scsi IO */
3641 spin_lock_init(&phba->scsi_buf_list_lock);
3642 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
3643
3644 /* Initialize the fabric iocb list */
3645 INIT_LIST_HEAD(&phba->fabric_iocb_list);
3646
3647 /* Initialize list to save ELS buffers */
3648 INIT_LIST_HEAD(&phba->elsbuf);
3649
3650 /* Initialize FCF connection rec list */
3651 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
3652
3653 return 0;
3654}
3655
3656/**
3657 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
3658 * @phba: pointer to lpfc hba data structure.
3659 *
3660 * This routine is invoked to set up the driver internal resources after the
3661 * device specific resource setup to support the HBA device it attached to.
3662 *
3663 * Return codes
3664 * 0 - sucessful
3665 * other values - error
3666 **/
3667static int
3668lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
3669{
3670 int error;
3671
3672 /* Startup the kernel thread for this host adapter. */
3673 phba->worker_thread = kthread_run(lpfc_do_work, phba,
3674 "lpfc_worker_%d", phba->brd_no);
3675 if (IS_ERR(phba->worker_thread)) {
3676 error = PTR_ERR(phba->worker_thread);
3677 return error;
3678 }
3679
3680 return 0;
3681}
3682
3683/**
3684 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
3685 * @phba: pointer to lpfc hba data structure.
3686 *
3687 * This routine is invoked to unset the driver internal resources set up after
3688 * the device specific resource setup for supporting the HBA device it
3689 * attached to.
3690 **/
3691static void
3692lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
3693{
3694 /* Stop kernel worker thread */
3695 kthread_stop(phba->worker_thread);
3696}
3697
3698/**
3699 * lpfc_free_iocb_list - Free iocb list.
3700 * @phba: pointer to lpfc hba data structure.
3701 *
3702 * This routine is invoked to free the driver's IOCB list and memory.
3703 **/
3704static void
3705lpfc_free_iocb_list(struct lpfc_hba *phba)
3706{
3707 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
3708
3709 spin_lock_irq(&phba->hbalock);
3710 list_for_each_entry_safe(iocbq_entry, iocbq_next,
3711 &phba->lpfc_iocb_list, list) {
3712 list_del(&iocbq_entry->list);
3713 kfree(iocbq_entry);
3714 phba->total_iocbq_bufs--;
3715 }
3716 spin_unlock_irq(&phba->hbalock);
3717
3718 return;
3719}
3720
3721/**
3722 * lpfc_init_iocb_list - Allocate and initialize iocb list.
3723 * @phba: pointer to lpfc hba data structure.
3724 *
3725 * This routine is invoked to allocate and initizlize the driver's IOCB
3726 * list and set up the IOCB tag array accordingly.
3727 *
3728 * Return codes
3729 * 0 - sucessful
3730 * other values - error
3731 **/
3732static int
3733lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
3734{
3735 struct lpfc_iocbq *iocbq_entry = NULL;
3736 uint16_t iotag;
3737 int i;
3738
3739 /* Initialize and populate the iocb list per host. */
3740 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3741 for (i = 0; i < iocb_count; i++) {
3742 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
3743 if (iocbq_entry == NULL) {
3744 printk(KERN_ERR "%s: only allocated %d iocbs of "
3745 "expected %d count. Unloading driver.\n",
3746 __func__, i, LPFC_IOCB_LIST_CNT);
3747 goto out_free_iocbq;
3748 }
3749
3750 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
3751 if (iotag == 0) {
3752 kfree(iocbq_entry);
3753 printk(KERN_ERR "%s: failed to allocate IOTAG. "
3754 "Unloading driver.\n", __func__);
3755 goto out_free_iocbq;
3756 }
3757 iocbq_entry->sli4_xritag = NO_XRI;
3758
3759 spin_lock_irq(&phba->hbalock);
3760 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
3761 phba->total_iocbq_bufs++;
3762 spin_unlock_irq(&phba->hbalock);
3763 }
3764
3765 return 0;
3766
3767out_free_iocbq:
3768 lpfc_free_iocb_list(phba);
3769
3770 return -ENOMEM;
3771}
3772
3773/**
3774 * lpfc_free_sgl_list - Free sgl list.
3775 * @phba: pointer to lpfc hba data structure.
3776 *
3777 * This routine is invoked to free the driver's sgl list and memory.
3778 **/
3779static void
3780lpfc_free_sgl_list(struct lpfc_hba *phba)
3781{
3782 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
3783 LIST_HEAD(sglq_list);
3784 int rc = 0;
3785
3786 spin_lock_irq(&phba->hbalock);
3787 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
3788 spin_unlock_irq(&phba->hbalock);
3789
3790 list_for_each_entry_safe(sglq_entry, sglq_next,
3791 &sglq_list, list) {
3792 list_del(&sglq_entry->list);
3793 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
3794 kfree(sglq_entry);
3795 phba->sli4_hba.total_sglq_bufs--;
3796 }
3797 rc = lpfc_sli4_remove_all_sgl_pages(phba);
3798 if (rc) {
3799 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3800 "2005 Unable to deregister pages from HBA: %x", rc);
3801 }
3802 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3803}
3804
3805/**
3806 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
3807 * @phba: pointer to lpfc hba data structure.
3808 *
3809 * This routine is invoked to allocate the driver's active sgl memory.
3810 * This array will hold the sglq_entry's for active IOs.
3811 **/
3812static int
3813lpfc_init_active_sgl_array(struct lpfc_hba *phba)
3814{
3815 int size;
3816 size = sizeof(struct lpfc_sglq *);
3817 size *= phba->sli4_hba.max_cfg_param.max_xri;
3818
3819 phba->sli4_hba.lpfc_sglq_active_list =
3820 kzalloc(size, GFP_KERNEL);
3821 if (!phba->sli4_hba.lpfc_sglq_active_list)
3822 return -ENOMEM;
3823 return 0;
3824}
3825
3826/**
3827 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3828 * @phba: pointer to lpfc hba data structure.
3829 *
3830 * This routine is invoked to walk through the array of active sglq entries
3831 * and free all of the resources.
3832 * This is just a place holder for now.
3833 **/
3834static void
3835lpfc_free_active_sgl(struct lpfc_hba *phba)
3836{
3837 kfree(phba->sli4_hba.lpfc_sglq_active_list);
3838}
3839
3840/**
3841 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3842 * @phba: pointer to lpfc hba data structure.
3843 *
3844 * This routine is invoked to allocate and initizlize the driver's sgl
3845 * list and set up the sgl xritag tag array accordingly.
3846 *
3847 * Return codes
3848 * 0 - sucessful
3849 * other values - error
3850 **/
3851static int
3852lpfc_init_sgl_list(struct lpfc_hba *phba)
3853{
3854 struct lpfc_sglq *sglq_entry = NULL;
3855 int i;
3856 int els_xri_cnt;
3857
3858 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3859 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3860 "2400 lpfc_init_sgl_list els %d.\n",
3861 els_xri_cnt);
3862 /* Initialize and populate the sglq list per host/VF. */
3863 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
3864 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
3865
3866 /* Sanity check on XRI management */
3867 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
3868 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3869 "2562 No room left for SCSI XRI allocation: "
3870 "max_xri=%d, els_xri=%d\n",
3871 phba->sli4_hba.max_cfg_param.max_xri,
3872 els_xri_cnt);
3873 return -ENOMEM;
3874 }
3875
3876 /* Allocate memory for the ELS XRI management array */
3877 phba->sli4_hba.lpfc_els_sgl_array =
3878 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
3879 GFP_KERNEL);
3880
3881 if (!phba->sli4_hba.lpfc_els_sgl_array) {
3882 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3883 "2401 Failed to allocate memory for ELS "
3884 "XRI management array of size %d.\n",
3885 els_xri_cnt);
3886 return -ENOMEM;
3887 }
3888
3889 /* Keep the SCSI XRI into the XRI management array */
3890 phba->sli4_hba.scsi_xri_max =
3891 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3892 phba->sli4_hba.scsi_xri_cnt = 0;
3893
3894 phba->sli4_hba.lpfc_scsi_psb_array =
3895 kzalloc((sizeof(struct lpfc_scsi_buf *) *
3896 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
3897
3898 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
3899 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3900 "2563 Failed to allocate memory for SCSI "
3901 "XRI management array of size %d.\n",
3902 phba->sli4_hba.scsi_xri_max);
3903 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3904 return -ENOMEM;
3905 }
3906
3907 for (i = 0; i < els_xri_cnt; i++) {
3908 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
3909 if (sglq_entry == NULL) {
3910 printk(KERN_ERR "%s: only allocated %d sgls of "
3911 "expected %d count. Unloading driver.\n",
3912 __func__, i, els_xri_cnt);
3913 goto out_free_mem;
3914 }
3915
3916 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
3917 if (sglq_entry->sli4_xritag == NO_XRI) {
3918 kfree(sglq_entry);
3919 printk(KERN_ERR "%s: failed to allocate XRI.\n"
3920 "Unloading driver.\n", __func__);
3921 goto out_free_mem;
3922 }
3923 sglq_entry->buff_type = GEN_BUFF_TYPE;
3924 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
3925 if (sglq_entry->virt == NULL) {
3926 kfree(sglq_entry);
3927 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
3928 "Unloading driver.\n", __func__);
3929 goto out_free_mem;
3930 }
3931 sglq_entry->sgl = sglq_entry->virt;
3932 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3933
3934 /* The list order is used by later block SGL registraton */
3935 spin_lock_irq(&phba->hbalock);
3936 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
3937 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
3938 phba->sli4_hba.total_sglq_bufs++;
3939 spin_unlock_irq(&phba->hbalock);
3940 }
3941 return 0;
3942
3943out_free_mem:
3944 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3945 lpfc_free_sgl_list(phba);
3946 return -ENOMEM;
3947}
3948
3949/**
3950 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
2318 * @phba: pointer to lpfc hba data structure. 3951 * @phba: pointer to lpfc hba data structure.
2319 * 3952 *
2320 * This routine is invoked to enable the MSI-X interrupt vectors. The kernel 3953 * This routine is invoked to post rpi header templates to the
2321 * function pci_enable_msix() is called to enable the MSI-X vectors. Note that 3954 * HBA consistent with the SLI-4 interface spec. This routine
2322 * pci_enable_msix(), once invoked, enables either all or nothing, depending 3955 * posts a PAGE_SIZE memory region to the port to hold up to
2323 * on the current availability of PCI vector resources. The device driver is 3956 * PAGE_SIZE modulo 64 rpi context headers.
2324 * responsible for calling the individual request_irq() to register each MSI-X 3957 * No locks are held here because this is an initialization routine
2325 * vector with a interrupt handler, which is done in this function. Note that 3958 * called only from probe or lpfc_online when interrupts are not
3959 * enabled and the driver is reinitializing the device.
3960 *
3961 * Return codes
3962 * 0 - sucessful
3963 * ENOMEM - No availble memory
3964 * EIO - The mailbox failed to complete successfully.
3965 **/
3966int
3967lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
3968{
3969 int rc = 0;
3970 int longs;
3971 uint16_t rpi_count;
3972 struct lpfc_rpi_hdr *rpi_hdr;
3973
3974 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
3975
3976 /*
3977 * Provision an rpi bitmask range for discovery. The total count
3978 * is the difference between max and base + 1.
3979 */
3980 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
3981 phba->sli4_hba.max_cfg_param.max_rpi - 1;
3982
3983 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
3984 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
3985 GFP_KERNEL);
3986 if (!phba->sli4_hba.rpi_bmask)
3987 return -ENOMEM;
3988
3989 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
3990 if (!rpi_hdr) {
3991 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3992 "0391 Error during rpi post operation\n");
3993 lpfc_sli4_remove_rpis(phba);
3994 rc = -ENODEV;
3995 }
3996
3997 return rc;
3998}
3999
4000/**
4001 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4002 * @phba: pointer to lpfc hba data structure.
4003 *
4004 * This routine is invoked to allocate a single 4KB memory region to
4005 * support rpis and stores them in the phba. This single region
4006 * provides support for up to 64 rpis. The region is used globally
4007 * by the device.
4008 *
4009 * Returns:
4010 * A valid rpi hdr on success.
4011 * A NULL pointer on any failure.
4012 **/
4013struct lpfc_rpi_hdr *
4014lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4015{
4016 uint16_t rpi_limit, curr_rpi_range;
4017 struct lpfc_dmabuf *dmabuf;
4018 struct lpfc_rpi_hdr *rpi_hdr;
4019
4020 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4021 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4022
4023 spin_lock_irq(&phba->hbalock);
4024 curr_rpi_range = phba->sli4_hba.next_rpi;
4025 spin_unlock_irq(&phba->hbalock);
4026
4027 /*
4028 * The port has a limited number of rpis. The increment here
4029 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4030 * and to allow the full max_rpi range per port.
4031 */
4032 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4033 return NULL;
4034
4035 /*
4036 * First allocate the protocol header region for the port. The
4037 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4038 */
4039 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4040 if (!dmabuf)
4041 return NULL;
4042
4043 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4044 LPFC_HDR_TEMPLATE_SIZE,
4045 &dmabuf->phys,
4046 GFP_KERNEL);
4047 if (!dmabuf->virt) {
4048 rpi_hdr = NULL;
4049 goto err_free_dmabuf;
4050 }
4051
4052 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4053 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4054 rpi_hdr = NULL;
4055 goto err_free_coherent;
4056 }
4057
4058 /* Save the rpi header data for cleanup later. */
4059 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4060 if (!rpi_hdr)
4061 goto err_free_coherent;
4062
4063 rpi_hdr->dmabuf = dmabuf;
4064 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4065 rpi_hdr->page_count = 1;
4066 spin_lock_irq(&phba->hbalock);
4067 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4068 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4069
4070 /*
4071 * The next_rpi stores the next module-64 rpi value to post
4072 * in any subsequent rpi memory region postings.
4073 */
4074 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4075 spin_unlock_irq(&phba->hbalock);
4076 return rpi_hdr;
4077
4078 err_free_coherent:
4079 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4080 dmabuf->virt, dmabuf->phys);
4081 err_free_dmabuf:
4082 kfree(dmabuf);
4083 return NULL;
4084}
4085
4086/**
4087 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4088 * @phba: pointer to lpfc hba data structure.
4089 *
4090 * This routine is invoked to remove all memory resources allocated
4091 * to support rpis. This routine presumes the caller has released all
4092 * rpis consumed by fabric or port logins and is prepared to have
4093 * the header pages removed.
4094 **/
4095void
4096lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4097{
4098 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4099
4100 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4101 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4102 list_del(&rpi_hdr->list);
4103 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4104 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4105 kfree(rpi_hdr->dmabuf);
4106 kfree(rpi_hdr);
4107 }
4108
4109 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4110 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4111}
4112
4113/**
4114 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4115 * @pdev: pointer to pci device data structure.
4116 *
4117 * This routine is invoked to allocate the driver hba data structure for an
4118 * HBA device. If the allocation is successful, the phba reference to the
4119 * PCI device data structure is set.
4120 *
4121 * Return codes
4122 * pointer to @phba - sucessful
4123 * NULL - error
4124 **/
4125static struct lpfc_hba *
4126lpfc_hba_alloc(struct pci_dev *pdev)
4127{
4128 struct lpfc_hba *phba;
4129
4130 /* Allocate memory for HBA structure */
4131 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4132 if (!phba) {
4133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4134 "1417 Failed to allocate hba struct.\n");
4135 return NULL;
4136 }
4137
4138 /* Set reference to PCI device in HBA structure */
4139 phba->pcidev = pdev;
4140
4141 /* Assign an unused board number */
4142 phba->brd_no = lpfc_get_instance();
4143 if (phba->brd_no < 0) {
4144 kfree(phba);
4145 return NULL;
4146 }
4147
4148 return phba;
4149}
4150
4151/**
4152 * lpfc_hba_free - Free driver hba data structure with a device.
4153 * @phba: pointer to lpfc hba data structure.
4154 *
4155 * This routine is invoked to free the driver hba data structure with an
4156 * HBA device.
4157 **/
4158static void
4159lpfc_hba_free(struct lpfc_hba *phba)
4160{
4161 /* Release the driver assigned board number */
4162 idr_remove(&lpfc_hba_index, phba->brd_no);
4163
4164 kfree(phba);
4165 return;
4166}
4167
4168/**
4169 * lpfc_create_shost - Create hba physical port with associated scsi host.
4170 * @phba: pointer to lpfc hba data structure.
4171 *
4172 * This routine is invoked to create HBA physical port and associate a SCSI
4173 * host with it.
4174 *
4175 * Return codes
4176 * 0 - sucessful
4177 * other values - error
4178 **/
4179static int
4180lpfc_create_shost(struct lpfc_hba *phba)
4181{
4182 struct lpfc_vport *vport;
4183 struct Scsi_Host *shost;
4184
4185 /* Initialize HBA FC structure */
4186 phba->fc_edtov = FF_DEF_EDTOV;
4187 phba->fc_ratov = FF_DEF_RATOV;
4188 phba->fc_altov = FF_DEF_ALTOV;
4189 phba->fc_arbtov = FF_DEF_ARBTOV;
4190
4191 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4192 if (!vport)
4193 return -ENODEV;
4194
4195 shost = lpfc_shost_from_vport(vport);
4196 phba->pport = vport;
4197 lpfc_debugfs_initialize(vport);
4198 /* Put reference to SCSI host to driver's device private data */
4199 pci_set_drvdata(phba->pcidev, shost);
4200
4201 return 0;
4202}
4203
4204/**
4205 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4206 * @phba: pointer to lpfc hba data structure.
4207 *
4208 * This routine is invoked to destroy HBA physical port and the associated
4209 * SCSI host.
4210 **/
4211static void
4212lpfc_destroy_shost(struct lpfc_hba *phba)
4213{
4214 struct lpfc_vport *vport = phba->pport;
4215
4216 /* Destroy physical port that associated with the SCSI host */
4217 destroy_port(vport);
4218
4219 return;
4220}
4221
4222/**
4223 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4224 * @phba: pointer to lpfc hba data structure.
4225 * @shost: the shost to be used to detect Block guard settings.
4226 *
4227 * This routine sets up the local Block guard protocol settings for @shost.
4228 * This routine also allocates memory for debugging bg buffers.
4229 **/
4230static void
4231lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4232{
4233 int pagecnt = 10;
4234 if (lpfc_prot_mask && lpfc_prot_guard) {
4235 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4236 "1478 Registering BlockGuard with the "
4237 "SCSI layer\n");
4238 scsi_host_set_prot(shost, lpfc_prot_mask);
4239 scsi_host_set_guard(shost, lpfc_prot_guard);
4240 }
4241 if (!_dump_buf_data) {
4242 while (pagecnt) {
4243 spin_lock_init(&_dump_buf_lock);
4244 _dump_buf_data =
4245 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4246 if (_dump_buf_data) {
4247 printk(KERN_ERR "BLKGRD allocated %d pages for "
4248 "_dump_buf_data at 0x%p\n",
4249 (1 << pagecnt), _dump_buf_data);
4250 _dump_buf_data_order = pagecnt;
4251 memset(_dump_buf_data, 0,
4252 ((1 << PAGE_SHIFT) << pagecnt));
4253 break;
4254 } else
4255 --pagecnt;
4256 }
4257 if (!_dump_buf_data_order)
4258 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4259 "memory for hexdump\n");
4260 } else
4261 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
4262 "\n", _dump_buf_data);
4263 if (!_dump_buf_dif) {
4264 while (pagecnt) {
4265 _dump_buf_dif =
4266 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4267 if (_dump_buf_dif) {
4268 printk(KERN_ERR "BLKGRD allocated %d pages for "
4269 "_dump_buf_dif at 0x%p\n",
4270 (1 << pagecnt), _dump_buf_dif);
4271 _dump_buf_dif_order = pagecnt;
4272 memset(_dump_buf_dif, 0,
4273 ((1 << PAGE_SHIFT) << pagecnt));
4274 break;
4275 } else
4276 --pagecnt;
4277 }
4278 if (!_dump_buf_dif_order)
4279 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4280 "memory for hexdump\n");
4281 } else
4282 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
4283 _dump_buf_dif);
4284}
4285
4286/**
4287 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4288 * @phba: pointer to lpfc hba data structure.
4289 *
4290 * This routine is invoked to perform all the necessary post initialization
4291 * setup for the device.
4292 **/
4293static void
4294lpfc_post_init_setup(struct lpfc_hba *phba)
4295{
4296 struct Scsi_Host *shost;
4297 struct lpfc_adapter_event_header adapter_event;
4298
4299 /* Get the default values for Model Name and Description */
4300 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4301
4302 /*
4303 * hba setup may have changed the hba_queue_depth so we need to
4304 * adjust the value of can_queue.
4305 */
4306 shost = pci_get_drvdata(phba->pcidev);
4307 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4308 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4309 lpfc_setup_bg(phba, shost);
4310
4311 lpfc_host_attrib_init(shost);
4312
4313 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4314 spin_lock_irq(shost->host_lock);
4315 lpfc_poll_start_timer(phba);
4316 spin_unlock_irq(shost->host_lock);
4317 }
4318
4319 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4320 "0428 Perform SCSI scan\n");
4321 /* Send board arrival event to upper layer */
4322 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4323 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4324 fc_host_post_vendor_event(shost, fc_get_event_number(),
4325 sizeof(adapter_event),
4326 (char *) &adapter_event,
4327 LPFC_NL_VENDOR_ID);
4328 return;
4329}
4330
4331/**
4332 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4333 * @phba: pointer to lpfc hba data structure.
4334 *
4335 * This routine is invoked to set up the PCI device memory space for device
4336 * with SLI-3 interface spec.
4337 *
4338 * Return codes
4339 * 0 - sucessful
4340 * other values - error
4341 **/
4342static int
4343lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4344{
4345 struct pci_dev *pdev;
4346 unsigned long bar0map_len, bar2map_len;
4347 int i, hbq_count;
4348 void *ptr;
4349 int error = -ENODEV;
4350
4351 /* Obtain PCI device reference */
4352 if (!phba->pcidev)
4353 return error;
4354 else
4355 pdev = phba->pcidev;
4356
4357 /* Set the device DMA mask size */
4358 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
4359 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
4360 return error;
4361
4362 /* Get the bus address of Bar0 and Bar2 and the number of bytes
4363 * required by each mapping.
4364 */
4365 phba->pci_bar0_map = pci_resource_start(pdev, 0);
4366 bar0map_len = pci_resource_len(pdev, 0);
4367
4368 phba->pci_bar2_map = pci_resource_start(pdev, 2);
4369 bar2map_len = pci_resource_len(pdev, 2);
4370
4371 /* Map HBA SLIM to a kernel virtual address. */
4372 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
4373 if (!phba->slim_memmap_p) {
4374 dev_printk(KERN_ERR, &pdev->dev,
4375 "ioremap failed for SLIM memory.\n");
4376 goto out;
4377 }
4378
4379 /* Map HBA Control Registers to a kernel virtual address. */
4380 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
4381 if (!phba->ctrl_regs_memmap_p) {
4382 dev_printk(KERN_ERR, &pdev->dev,
4383 "ioremap failed for HBA control registers.\n");
4384 goto out_iounmap_slim;
4385 }
4386
4387 /* Allocate memory for SLI-2 structures */
4388 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
4389 SLI2_SLIM_SIZE,
4390 &phba->slim2p.phys,
4391 GFP_KERNEL);
4392 if (!phba->slim2p.virt)
4393 goto out_iounmap;
4394
4395 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
4396 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
4397 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
4398 phba->IOCBs = (phba->slim2p.virt +
4399 offsetof(struct lpfc_sli2_slim, IOCBs));
4400
4401 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
4402 lpfc_sli_hbq_size(),
4403 &phba->hbqslimp.phys,
4404 GFP_KERNEL);
4405 if (!phba->hbqslimp.virt)
4406 goto out_free_slim;
4407
4408 hbq_count = lpfc_sli_hbq_count();
4409 ptr = phba->hbqslimp.virt;
4410 for (i = 0; i < hbq_count; ++i) {
4411 phba->hbqs[i].hbq_virt = ptr;
4412 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4413 ptr += (lpfc_hbq_defs[i]->entry_count *
4414 sizeof(struct lpfc_hbq_entry));
4415 }
4416 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
4417 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
4418
4419 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
4420
4421 INIT_LIST_HEAD(&phba->rb_pend_list);
4422
4423 phba->MBslimaddr = phba->slim_memmap_p;
4424 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
4425 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
4426 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
4427 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
4428
4429 return 0;
4430
4431out_free_slim:
4432 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4433 phba->slim2p.virt, phba->slim2p.phys);
4434out_iounmap:
4435 iounmap(phba->ctrl_regs_memmap_p);
4436out_iounmap_slim:
4437 iounmap(phba->slim_memmap_p);
4438out:
4439 return error;
4440}
4441
4442/**
4443 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
4444 * @phba: pointer to lpfc hba data structure.
4445 *
4446 * This routine is invoked to unset the PCI device memory space for device
4447 * with SLI-3 interface spec.
4448 **/
4449static void
4450lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4451{
4452 struct pci_dev *pdev;
4453
4454 /* Obtain PCI device reference */
4455 if (!phba->pcidev)
4456 return;
4457 else
4458 pdev = phba->pcidev;
4459
4460 /* Free coherent DMA memory allocated */
4461 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
4462 phba->hbqslimp.virt, phba->hbqslimp.phys);
4463 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4464 phba->slim2p.virt, phba->slim2p.phys);
4465
4466 /* I/O memory unmap */
4467 iounmap(phba->ctrl_regs_memmap_p);
4468 iounmap(phba->slim_memmap_p);
4469
4470 return;
4471}
4472
4473/**
4474 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
4475 * @phba: pointer to lpfc hba data structure.
4476 *
4477 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
4478 * done and check status.
4479 *
4480 * Return 0 if successful, otherwise -ENODEV.
4481 **/
4482int
4483lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4484{
4485 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
4486 uint32_t onlnreg0, onlnreg1;
4487 int i, port_error = -ENODEV;
4488
4489 if (!phba->sli4_hba.STAregaddr)
4490 return -ENODEV;
4491
4492 /* With uncoverable error, log the error message and return error */
4493 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4494 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4495 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4496 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4497 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4498 if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4499 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4500 "1422 HBA Unrecoverable error: "
4501 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4502 "online0_reg=0x%x, online1_reg=0x%x\n",
4503 uerrlo_reg.word0, uerrhi_reg.word0,
4504 onlnreg0, onlnreg1);
4505 }
4506 return -ENODEV;
4507 }
4508
4509 /* Wait up to 30 seconds for the SLI Port POST done and ready */
4510 for (i = 0; i < 3000; i++) {
4511 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
4512 /* Encounter fatal POST error, break out */
4513 if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
4514 port_error = -ENODEV;
4515 break;
4516 }
4517 if (LPFC_POST_STAGE_ARMFW_READY ==
4518 bf_get(lpfc_hst_state_port_status, &sta_reg)) {
4519 port_error = 0;
4520 break;
4521 }
4522 msleep(10);
4523 }
4524
4525 if (port_error)
4526 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4527 "1408 Failure HBA POST Status: sta_reg=0x%x, "
4528 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
4529 "dl=x%x, pstatus=x%x\n", sta_reg.word0,
4530 bf_get(lpfc_hst_state_perr, &sta_reg),
4531 bf_get(lpfc_hst_state_sfi, &sta_reg),
4532 bf_get(lpfc_hst_state_nip, &sta_reg),
4533 bf_get(lpfc_hst_state_ipc, &sta_reg),
4534 bf_get(lpfc_hst_state_xrom, &sta_reg),
4535 bf_get(lpfc_hst_state_dl, &sta_reg),
4536 bf_get(lpfc_hst_state_port_status, &sta_reg));
4537
4538 /* Log device information */
4539 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr);
4540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4541 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4542 "FeatureL1=0x%x, FeatureL2=0x%x\n",
4543 bf_get(lpfc_scratchpad_chiptype, &scratchpad),
4544 bf_get(lpfc_scratchpad_slirev, &scratchpad),
4545 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4546 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4547
4548 return port_error;
4549}
4550
4551/**
4552 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
4553 * @phba: pointer to lpfc hba data structure.
4554 *
4555 * This routine is invoked to set up SLI4 BAR0 PCI config space register
4556 * memory map.
4557 **/
4558static void
4559lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4560{
4561 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4562 LPFC_UERR_STATUS_LO;
4563 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4564 LPFC_UERR_STATUS_HI;
4565 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
4566 LPFC_ONLINE0;
4567 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
4568 LPFC_ONLINE1;
4569 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
4570 LPFC_SCRATCHPAD;
4571}
4572
4573/**
4574 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
4575 * @phba: pointer to lpfc hba data structure.
4576 *
4577 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
4578 * memory map.
4579 **/
4580static void
4581lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
4582{
4583
4584 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4585 LPFC_HST_STATE;
4586 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4587 LPFC_HST_ISR0;
4588 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4589 LPFC_HST_IMR0;
4590 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4591 LPFC_HST_ISCR0;
4592 return;
4593}
4594
4595/**
4596 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
4597 * @phba: pointer to lpfc hba data structure.
4598 * @vf: virtual function number
4599 *
4600 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
4601 * based on the given viftual function number, @vf.
4602 *
4603 * Return 0 if successful, otherwise -ENODEV.
4604 **/
4605static int
4606lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
4607{
4608 if (vf > LPFC_VIR_FUNC_MAX)
4609 return -ENODEV;
4610
4611 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4612 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
4613 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4614 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
4615 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4616 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
4617 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4618 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
4619 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4620 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
4621 return 0;
4622}
4623
4624/**
4625 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
4626 * @phba: pointer to lpfc hba data structure.
4627 *
4628 * This routine is invoked to create the bootstrap mailbox
4629 * region consistent with the SLI-4 interface spec. This
4630 * routine allocates all memory necessary to communicate
4631 * mailbox commands to the port and sets up all alignment
4632 * needs. No locks are expected to be held when calling
4633 * this routine.
4634 *
4635 * Return codes
4636 * 0 - sucessful
4637 * ENOMEM - could not allocated memory.
4638 **/
4639static int
4640lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
4641{
4642 uint32_t bmbx_size;
4643 struct lpfc_dmabuf *dmabuf;
4644 struct dma_address *dma_address;
4645 uint32_t pa_addr;
4646 uint64_t phys_addr;
4647
4648 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4649 if (!dmabuf)
4650 return -ENOMEM;
4651
4652 /*
4653 * The bootstrap mailbox region is comprised of 2 parts
4654 * plus an alignment restriction of 16 bytes.
4655 */
4656 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
4657 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4658 bmbx_size,
4659 &dmabuf->phys,
4660 GFP_KERNEL);
4661 if (!dmabuf->virt) {
4662 kfree(dmabuf);
4663 return -ENOMEM;
4664 }
4665 memset(dmabuf->virt, 0, bmbx_size);
4666
4667 /*
4668 * Initialize the bootstrap mailbox pointers now so that the register
4669 * operations are simple later. The mailbox dma address is required
4670 * to be 16-byte aligned. Also align the virtual memory as each
4671 * maibox is copied into the bmbx mailbox region before issuing the
4672 * command to the port.
4673 */
4674 phba->sli4_hba.bmbx.dmabuf = dmabuf;
4675 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
4676
4677 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
4678 LPFC_ALIGN_16_BYTE);
4679 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
4680 LPFC_ALIGN_16_BYTE);
4681
4682 /*
4683 * Set the high and low physical addresses now. The SLI4 alignment
4684 * requirement is 16 bytes and the mailbox is posted to the port
4685 * as two 30-bit addresses. The other data is a bit marking whether
4686 * the 30-bit address is the high or low address.
4687 * Upcast bmbx aphys to 64bits so shift instruction compiles
4688 * clean on 32 bit machines.
4689 */
4690 dma_address = &phba->sli4_hba.bmbx.dma_address;
4691 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
4692 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
4693 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
4694 LPFC_BMBX_BIT1_ADDR_HI);
4695
4696 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
4697 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
4698 LPFC_BMBX_BIT1_ADDR_LO);
4699 return 0;
4700}
4701
4702/**
4703 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
4704 * @phba: pointer to lpfc hba data structure.
4705 *
4706 * This routine is invoked to teardown the bootstrap mailbox
4707 * region and release all host resources. This routine requires
4708 * the caller to ensure all mailbox commands recovered, no
4709 * additional mailbox comands are sent, and interrupts are disabled
4710 * before calling this routine.
4711 *
4712 **/
4713static void
4714lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
4715{
4716 dma_free_coherent(&phba->pcidev->dev,
4717 phba->sli4_hba.bmbx.bmbx_size,
4718 phba->sli4_hba.bmbx.dmabuf->virt,
4719 phba->sli4_hba.bmbx.dmabuf->phys);
4720
4721 kfree(phba->sli4_hba.bmbx.dmabuf);
4722 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
4723}
4724
4725/**
4726 * lpfc_sli4_read_config - Get the config parameters.
4727 * @phba: pointer to lpfc hba data structure.
4728 *
4729 * This routine is invoked to read the configuration parameters from the HBA.
4730 * The configuration parameters are used to set the base and maximum values
4731 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
4732 * allocation for the port.
4733 *
4734 * Return codes
4735 * 0 - sucessful
4736 * ENOMEM - No availble memory
4737 * EIO - The mailbox failed to complete successfully.
4738 **/
4739static int
4740lpfc_sli4_read_config(struct lpfc_hba *phba)
4741{
4742 LPFC_MBOXQ_t *pmb;
4743 struct lpfc_mbx_read_config *rd_config;
4744 uint32_t rc = 0;
4745
4746 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4747 if (!pmb) {
4748 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4749 "2011 Unable to allocate memory for issuing "
4750 "SLI_CONFIG_SPECIAL mailbox command\n");
4751 return -ENOMEM;
4752 }
4753
4754 lpfc_read_config(phba, pmb);
4755
4756 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4757 if (rc != MBX_SUCCESS) {
4758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4759 "2012 Mailbox failed , mbxCmd x%x "
4760 "READ_CONFIG, mbxStatus x%x\n",
4761 bf_get(lpfc_mqe_command, &pmb->u.mqe),
4762 bf_get(lpfc_mqe_status, &pmb->u.mqe));
4763 rc = -EIO;
4764 } else {
4765 rd_config = &pmb->u.mqe.un.rd_config;
4766 phba->sli4_hba.max_cfg_param.max_xri =
4767 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
4768 phba->sli4_hba.max_cfg_param.xri_base =
4769 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
4770 phba->sli4_hba.max_cfg_param.max_vpi =
4771 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
4772 phba->sli4_hba.max_cfg_param.vpi_base =
4773 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
4774 phba->sli4_hba.max_cfg_param.max_rpi =
4775 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
4776 phba->sli4_hba.max_cfg_param.rpi_base =
4777 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
4778 phba->sli4_hba.max_cfg_param.max_vfi =
4779 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
4780 phba->sli4_hba.max_cfg_param.vfi_base =
4781 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
4782 phba->sli4_hba.max_cfg_param.max_fcfi =
4783 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
4784 phba->sli4_hba.max_cfg_param.fcfi_base =
4785 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
4786 phba->sli4_hba.max_cfg_param.max_eq =
4787 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
4788 phba->sli4_hba.max_cfg_param.max_rq =
4789 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
4790 phba->sli4_hba.max_cfg_param.max_wq =
4791 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
4792 phba->sli4_hba.max_cfg_param.max_cq =
4793 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
4794 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
4795 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
4796 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4797 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4798 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4799 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
4800 phba->max_vports = phba->max_vpi;
4801 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4802 "2003 cfg params XRI(B:%d M:%d), "
4803 "VPI(B:%d M:%d) "
4804 "VFI(B:%d M:%d) "
4805 "RPI(B:%d M:%d) "
4806 "FCFI(B:%d M:%d)\n",
4807 phba->sli4_hba.max_cfg_param.xri_base,
4808 phba->sli4_hba.max_cfg_param.max_xri,
4809 phba->sli4_hba.max_cfg_param.vpi_base,
4810 phba->sli4_hba.max_cfg_param.max_vpi,
4811 phba->sli4_hba.max_cfg_param.vfi_base,
4812 phba->sli4_hba.max_cfg_param.max_vfi,
4813 phba->sli4_hba.max_cfg_param.rpi_base,
4814 phba->sli4_hba.max_cfg_param.max_rpi,
4815 phba->sli4_hba.max_cfg_param.fcfi_base,
4816 phba->sli4_hba.max_cfg_param.max_fcfi);
4817 }
4818 mempool_free(pmb, phba->mbox_mem_pool);
4819
4820 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
4821 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
4822 phba->cfg_hba_queue_depth =
4823 phba->sli4_hba.max_cfg_param.max_xri;
4824 return rc;
4825}
4826
4827/**
4828 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
4829 * @phba: pointer to lpfc hba data structure.
4830 *
4831 * This routine is invoked to setup the host-side endian order to the
4832 * HBA consistent with the SLI-4 interface spec.
4833 *
4834 * Return codes
4835 * 0 - sucessful
4836 * ENOMEM - No availble memory
4837 * EIO - The mailbox failed to complete successfully.
4838 **/
4839static int
4840lpfc_setup_endian_order(struct lpfc_hba *phba)
4841{
4842 LPFC_MBOXQ_t *mboxq;
4843 uint32_t rc = 0;
4844 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
4845 HOST_ENDIAN_HIGH_WORD1};
4846
4847 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4848 if (!mboxq) {
4849 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4850 "0492 Unable to allocate memory for issuing "
4851 "SLI_CONFIG_SPECIAL mailbox command\n");
4852 return -ENOMEM;
4853 }
4854
4855 /*
4856 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
4857 * words to contain special data values and no other data.
4858 */
4859 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
4860 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
4861 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4862 if (rc != MBX_SUCCESS) {
4863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4864 "0493 SLI_CONFIG_SPECIAL mailbox failed with "
4865 "status x%x\n",
4866 rc);
4867 rc = -EIO;
4868 }
4869
4870 mempool_free(mboxq, phba->mbox_mem_pool);
4871 return rc;
4872}
4873
4874/**
4875 * lpfc_sli4_queue_create - Create all the SLI4 queues
4876 * @phba: pointer to lpfc hba data structure.
4877 *
4878 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
4879 * operation. For each SLI4 queue type, the parameters such as queue entry
4880 * count (queue depth) shall be taken from the module parameter. For now,
4881 * we just use some constant number as place holder.
4882 *
4883 * Return codes
4884 * 0 - sucessful
4885 * ENOMEM - No availble memory
4886 * EIO - The mailbox failed to complete successfully.
4887 **/
4888static int
4889lpfc_sli4_queue_create(struct lpfc_hba *phba)
4890{
4891 struct lpfc_queue *qdesc;
4892 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
4893 int cfg_fcp_wq_count;
4894 int cfg_fcp_eq_count;
4895
4896 /*
4897 * Sanity check for confiugred queue parameters against the run-time
4898 * device parameters
4899 */
4900
4901 /* Sanity check on FCP fast-path WQ parameters */
4902 cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
4903 if (cfg_fcp_wq_count >
4904 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
4905 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
4906 LPFC_SP_WQN_DEF;
4907 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
4908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4909 "2581 Not enough WQs (%d) from "
4910 "the pci function for supporting "
4911 "FCP WQs (%d)\n",
4912 phba->sli4_hba.max_cfg_param.max_wq,
4913 phba->cfg_fcp_wq_count);
4914 goto out_error;
4915 }
4916 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4917 "2582 Not enough WQs (%d) from the pci "
4918 "function for supporting the requested "
4919 "FCP WQs (%d), the actual FCP WQs can "
4920 "be supported: %d\n",
4921 phba->sli4_hba.max_cfg_param.max_wq,
4922 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
4923 }
4924 /* The actual number of FCP work queues adopted */
4925 phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
4926
4927 /* Sanity check on FCP fast-path EQ parameters */
4928 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
4929 if (cfg_fcp_eq_count >
4930 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
4931 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
4932 LPFC_SP_EQN_DEF;
4933 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
4934 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4935 "2574 Not enough EQs (%d) from the "
4936 "pci function for supporting FCP "
4937 "EQs (%d)\n",
4938 phba->sli4_hba.max_cfg_param.max_eq,
4939 phba->cfg_fcp_eq_count);
4940 goto out_error;
4941 }
4942 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4943 "2575 Not enough EQs (%d) from the pci "
4944 "function for supporting the requested "
4945 "FCP EQs (%d), the actual FCP EQs can "
4946 "be supported: %d\n",
4947 phba->sli4_hba.max_cfg_param.max_eq,
4948 phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
4949 }
4950 /* It does not make sense to have more EQs than WQs */
4951 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4952 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4953 "2593 The number of FCP EQs (%d) is more "
4954 "than the number of FCP WQs (%d), take "
4955 "the number of FCP EQs same as than of "
4956 "WQs (%d)\n", cfg_fcp_eq_count,
4957 phba->cfg_fcp_wq_count,
4958 phba->cfg_fcp_wq_count);
4959 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
4960 }
4961 /* The actual number of FCP event queues adopted */
4962 phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
4963 /* The overall number of event queues used */
4964 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
4965
4966 /*
4967 * Create Event Queues (EQs)
4968 */
4969
4970 /* Get EQ depth from module parameter, fake the default for now */
4971 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
4972 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
4973
4974 /* Create slow path event queue */
4975 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4976 phba->sli4_hba.eq_ecount);
4977 if (!qdesc) {
4978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4979 "0496 Failed allocate slow-path EQ\n");
4980 goto out_error;
4981 }
4982 phba->sli4_hba.sp_eq = qdesc;
4983
4984 /* Create fast-path FCP Event Queue(s) */
4985 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
4986 phba->cfg_fcp_eq_count), GFP_KERNEL);
4987 if (!phba->sli4_hba.fp_eq) {
4988 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4989 "2576 Failed allocate memory for fast-path "
4990 "EQ record array\n");
4991 goto out_free_sp_eq;
4992 }
4993 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
4994 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4995 phba->sli4_hba.eq_ecount);
4996 if (!qdesc) {
4997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4998 "0497 Failed allocate fast-path EQ\n");
4999 goto out_free_fp_eq;
5000 }
5001 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5002 }
5003
5004 /*
5005 * Create Complete Queues (CQs)
5006 */
5007
5008 /* Get CQ depth from module parameter, fake the default for now */
5009 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5010 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5011
5012 /* Create slow-path Mailbox Command Complete Queue */
5013 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5014 phba->sli4_hba.cq_ecount);
5015 if (!qdesc) {
5016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5017 "0500 Failed allocate slow-path mailbox CQ\n");
5018 goto out_free_fp_eq;
5019 }
5020 phba->sli4_hba.mbx_cq = qdesc;
5021
5022 /* Create slow-path ELS Complete Queue */
5023 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5024 phba->sli4_hba.cq_ecount);
5025 if (!qdesc) {
5026 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5027 "0501 Failed allocate slow-path ELS CQ\n");
5028 goto out_free_mbx_cq;
5029 }
5030 phba->sli4_hba.els_cq = qdesc;
5031
5032 /* Create slow-path Unsolicited Receive Complete Queue */
5033 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5034 phba->sli4_hba.cq_ecount);
5035 if (!qdesc) {
5036 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5037 "0502 Failed allocate slow-path USOL RX CQ\n");
5038 goto out_free_els_cq;
5039 }
5040 phba->sli4_hba.rxq_cq = qdesc;
5041
5042 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5043 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5044 phba->cfg_fcp_eq_count), GFP_KERNEL);
5045 if (!phba->sli4_hba.fcp_cq) {
5046 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5047 "2577 Failed allocate memory for fast-path "
5048 "CQ record array\n");
5049 goto out_free_rxq_cq;
5050 }
5051 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5052 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5053 phba->sli4_hba.cq_ecount);
5054 if (!qdesc) {
5055 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5056 "0499 Failed allocate fast-path FCP "
5057 "CQ (%d)\n", fcp_cqidx);
5058 goto out_free_fcp_cq;
5059 }
5060 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5061 }
5062
5063 /* Create Mailbox Command Queue */
5064 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5065 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5066
5067 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5068 phba->sli4_hba.mq_ecount);
5069 if (!qdesc) {
5070 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5071 "0505 Failed allocate slow-path MQ\n");
5072 goto out_free_fcp_cq;
5073 }
5074 phba->sli4_hba.mbx_wq = qdesc;
5075
5076 /*
5077 * Create all the Work Queues (WQs)
5078 */
5079 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5080 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5081
5082 /* Create slow-path ELS Work Queue */
5083 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5084 phba->sli4_hba.wq_ecount);
5085 if (!qdesc) {
5086 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5087 "0504 Failed allocate slow-path ELS WQ\n");
5088 goto out_free_mbx_wq;
5089 }
5090 phba->sli4_hba.els_wq = qdesc;
5091
5092 /* Create fast-path FCP Work Queue(s) */
5093 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5094 phba->cfg_fcp_wq_count), GFP_KERNEL);
5095 if (!phba->sli4_hba.fcp_wq) {
5096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5097 "2578 Failed allocate memory for fast-path "
5098 "WQ record array\n");
5099 goto out_free_els_wq;
5100 }
5101 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5102 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5103 phba->sli4_hba.wq_ecount);
5104 if (!qdesc) {
5105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5106 "0503 Failed allocate fast-path FCP "
5107 "WQ (%d)\n", fcp_wqidx);
5108 goto out_free_fcp_wq;
5109 }
5110 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5111 }
5112
5113 /*
5114 * Create Receive Queue (RQ)
5115 */
5116 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5117 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5118
5119 /* Create Receive Queue for header */
5120 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5121 phba->sli4_hba.rq_ecount);
5122 if (!qdesc) {
5123 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5124 "0506 Failed allocate receive HRQ\n");
5125 goto out_free_fcp_wq;
5126 }
5127 phba->sli4_hba.hdr_rq = qdesc;
5128
5129 /* Create Receive Queue for data */
5130 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5131 phba->sli4_hba.rq_ecount);
5132 if (!qdesc) {
5133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5134 "0507 Failed allocate receive DRQ\n");
5135 goto out_free_hdr_rq;
5136 }
5137 phba->sli4_hba.dat_rq = qdesc;
5138
5139 return 0;
5140
5141out_free_hdr_rq:
5142 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5143 phba->sli4_hba.hdr_rq = NULL;
5144out_free_fcp_wq:
5145 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5146 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5147 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5148 }
5149 kfree(phba->sli4_hba.fcp_wq);
5150out_free_els_wq:
5151 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5152 phba->sli4_hba.els_wq = NULL;
5153out_free_mbx_wq:
5154 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5155 phba->sli4_hba.mbx_wq = NULL;
5156out_free_fcp_cq:
5157 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5158 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5159 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5160 }
5161 kfree(phba->sli4_hba.fcp_cq);
5162out_free_rxq_cq:
5163 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5164 phba->sli4_hba.rxq_cq = NULL;
5165out_free_els_cq:
5166 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5167 phba->sli4_hba.els_cq = NULL;
5168out_free_mbx_cq:
5169 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5170 phba->sli4_hba.mbx_cq = NULL;
5171out_free_fp_eq:
5172 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5173 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5174 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5175 }
5176 kfree(phba->sli4_hba.fp_eq);
5177out_free_sp_eq:
5178 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5179 phba->sli4_hba.sp_eq = NULL;
5180out_error:
5181 return -ENOMEM;
5182}
5183
5184/**
5185 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5186 * @phba: pointer to lpfc hba data structure.
5187 *
5188 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5189 * operation.
5190 *
5191 * Return codes
5192 * 0 - sucessful
5193 * ENOMEM - No availble memory
5194 * EIO - The mailbox failed to complete successfully.
5195 **/
5196static void
5197lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5198{
5199 int fcp_qidx;
5200
5201 /* Release mailbox command work queue */
5202 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5203 phba->sli4_hba.mbx_wq = NULL;
5204
5205 /* Release ELS work queue */
5206 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5207 phba->sli4_hba.els_wq = NULL;
5208
5209 /* Release FCP work queue */
5210 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5211 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5212 kfree(phba->sli4_hba.fcp_wq);
5213 phba->sli4_hba.fcp_wq = NULL;
5214
5215 /* Release unsolicited receive queue */
5216 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5217 phba->sli4_hba.hdr_rq = NULL;
5218 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5219 phba->sli4_hba.dat_rq = NULL;
5220
5221 /* Release unsolicited receive complete queue */
5222 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5223 phba->sli4_hba.rxq_cq = NULL;
5224
5225 /* Release ELS complete queue */
5226 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5227 phba->sli4_hba.els_cq = NULL;
5228
5229 /* Release mailbox command complete queue */
5230 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5231 phba->sli4_hba.mbx_cq = NULL;
5232
5233 /* Release FCP response complete queue */
5234 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5235 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5236 kfree(phba->sli4_hba.fcp_cq);
5237 phba->sli4_hba.fcp_cq = NULL;
5238
5239 /* Release fast-path event queue */
5240 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5241 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5242 kfree(phba->sli4_hba.fp_eq);
5243 phba->sli4_hba.fp_eq = NULL;
5244
5245 /* Release slow-path event queue */
5246 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5247 phba->sli4_hba.sp_eq = NULL;
5248
5249 return;
5250}
5251
5252/**
5253 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5254 * @phba: pointer to lpfc hba data structure.
5255 *
5256 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5257 * operation.
5258 *
5259 * Return codes
5260 * 0 - sucessful
5261 * ENOMEM - No availble memory
5262 * EIO - The mailbox failed to complete successfully.
5263 **/
5264int
5265lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5266{
5267 int rc = -ENOMEM;
5268 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5269 int fcp_cq_index = 0;
5270
5271 /*
5272 * Set up Event Queues (EQs)
5273 */
5274
5275 /* Set up slow-path event queue */
5276 if (!phba->sli4_hba.sp_eq) {
5277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5278 "0520 Slow-path EQ not allocated\n");
5279 goto out_error;
5280 }
5281 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5282 LPFC_SP_DEF_IMAX);
5283 if (rc) {
5284 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5285 "0521 Failed setup of slow-path EQ: "
5286 "rc = 0x%x\n", rc);
5287 goto out_error;
5288 }
5289 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5290 "2583 Slow-path EQ setup: queue-id=%d\n",
5291 phba->sli4_hba.sp_eq->queue_id);
5292
5293 /* Set up fast-path event queue */
5294 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5295 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5296 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5297 "0522 Fast-path EQ (%d) not "
5298 "allocated\n", fcp_eqidx);
5299 goto out_destroy_fp_eq;
5300 }
5301 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5302 phba->cfg_fcp_imax);
5303 if (rc) {
5304 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5305 "0523 Failed setup of fast-path EQ "
5306 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
5307 goto out_destroy_fp_eq;
5308 }
5309 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5310 "2584 Fast-path EQ setup: "
5311 "queue[%d]-id=%d\n", fcp_eqidx,
5312 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5313 }
5314
5315 /*
5316 * Set up Complete Queues (CQs)
5317 */
5318
5319 /* Set up slow-path MBOX Complete Queue as the first CQ */
5320 if (!phba->sli4_hba.mbx_cq) {
5321 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5322 "0528 Mailbox CQ not allocated\n");
5323 goto out_destroy_fp_eq;
5324 }
5325 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5326 LPFC_MCQ, LPFC_MBOX);
5327 if (rc) {
5328 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5329 "0529 Failed setup of slow-path mailbox CQ: "
5330 "rc = 0x%x\n", rc);
5331 goto out_destroy_fp_eq;
5332 }
5333 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5334 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5335 phba->sli4_hba.mbx_cq->queue_id,
5336 phba->sli4_hba.sp_eq->queue_id);
5337
5338 /* Set up slow-path ELS Complete Queue */
5339 if (!phba->sli4_hba.els_cq) {
5340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5341 "0530 ELS CQ not allocated\n");
5342 goto out_destroy_mbx_cq;
5343 }
5344 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
5345 LPFC_WCQ, LPFC_ELS);
5346 if (rc) {
5347 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5348 "0531 Failed setup of slow-path ELS CQ: "
5349 "rc = 0x%x\n", rc);
5350 goto out_destroy_mbx_cq;
5351 }
5352 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5353 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
5354 phba->sli4_hba.els_cq->queue_id,
5355 phba->sli4_hba.sp_eq->queue_id);
5356
5357 /* Set up slow-path Unsolicited Receive Complete Queue */
5358 if (!phba->sli4_hba.rxq_cq) {
5359 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5360 "0532 USOL RX CQ not allocated\n");
5361 goto out_destroy_els_cq;
5362 }
5363 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5364 LPFC_RCQ, LPFC_USOL);
5365 if (rc) {
5366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5367 "0533 Failed setup of slow-path USOL RX CQ: "
5368 "rc = 0x%x\n", rc);
5369 goto out_destroy_els_cq;
5370 }
5371 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5372 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5373 phba->sli4_hba.rxq_cq->queue_id,
5374 phba->sli4_hba.sp_eq->queue_id);
5375
5376 /* Set up fast-path FCP Response Complete Queue */
5377 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5378 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
5379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5380 "0526 Fast-path FCP CQ (%d) not "
5381 "allocated\n", fcp_cqidx);
5382 goto out_destroy_fcp_cq;
5383 }
5384 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
5385 phba->sli4_hba.fp_eq[fcp_cqidx],
5386 LPFC_WCQ, LPFC_FCP);
5387 if (rc) {
5388 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5389 "0527 Failed setup of fast-path FCP "
5390 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
5391 goto out_destroy_fcp_cq;
5392 }
5393 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5394 "2588 FCP CQ setup: cq[%d]-id=%d, "
5395 "parent eq[%d]-id=%d\n",
5396 fcp_cqidx,
5397 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
5398 fcp_cqidx,
5399 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
5400 }
5401
5402 /*
5403 * Set up all the Work Queues (WQs)
5404 */
5405
5406 /* Set up Mailbox Command Queue */
5407 if (!phba->sli4_hba.mbx_wq) {
5408 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5409 "0538 Slow-path MQ not allocated\n");
5410 goto out_destroy_fcp_cq;
5411 }
5412 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
5413 phba->sli4_hba.mbx_cq, LPFC_MBOX);
5414 if (rc) {
5415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5416 "0539 Failed setup of slow-path MQ: "
5417 "rc = 0x%x\n", rc);
5418 goto out_destroy_fcp_cq;
5419 }
5420 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5421 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
5422 phba->sli4_hba.mbx_wq->queue_id,
5423 phba->sli4_hba.mbx_cq->queue_id);
5424
5425 /* Set up slow-path ELS Work Queue */
5426 if (!phba->sli4_hba.els_wq) {
5427 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5428 "0536 Slow-path ELS WQ not allocated\n");
5429 goto out_destroy_mbx_wq;
5430 }
5431 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
5432 phba->sli4_hba.els_cq, LPFC_ELS);
5433 if (rc) {
5434 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5435 "0537 Failed setup of slow-path ELS WQ: "
5436 "rc = 0x%x\n", rc);
5437 goto out_destroy_mbx_wq;
5438 }
5439 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5440 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
5441 phba->sli4_hba.els_wq->queue_id,
5442 phba->sli4_hba.els_cq->queue_id);
5443
5444 /* Set up fast-path FCP Work Queue */
5445 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5446 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
5447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5448 "0534 Fast-path FCP WQ (%d) not "
5449 "allocated\n", fcp_wqidx);
5450 goto out_destroy_fcp_wq;
5451 }
5452 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
5453 phba->sli4_hba.fcp_cq[fcp_cq_index],
5454 LPFC_FCP);
5455 if (rc) {
5456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5457 "0535 Failed setup of fast-path FCP "
5458 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
5459 goto out_destroy_fcp_wq;
5460 }
5461 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5462 "2591 FCP WQ setup: wq[%d]-id=%d, "
5463 "parent cq[%d]-id=%d\n",
5464 fcp_wqidx,
5465 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
5466 fcp_cq_index,
5467 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
5468 /* Round robin FCP Work Queue's Completion Queue assignment */
5469 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
5470 }
5471
5472 /*
5473 * Create Receive Queue (RQ)
5474 */
5475 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
5476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5477 "0540 Receive Queue not allocated\n");
5478 goto out_destroy_fcp_wq;
5479 }
5480 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5481 phba->sli4_hba.rxq_cq, LPFC_USOL);
5482 if (rc) {
5483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5484 "0541 Failed setup of Receive Queue: "
5485 "rc = 0x%x\n", rc);
5486 goto out_destroy_fcp_wq;
5487 }
5488 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5489 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
5490 "parent cq-id=%d\n",
5491 phba->sli4_hba.hdr_rq->queue_id,
5492 phba->sli4_hba.dat_rq->queue_id,
5493 phba->sli4_hba.rxq_cq->queue_id);
5494 return 0;
5495
5496out_destroy_fcp_wq:
5497 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
5498 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
5499 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5500out_destroy_mbx_wq:
5501 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5502out_destroy_fcp_cq:
5503 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5504 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5505 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5506out_destroy_els_cq:
5507 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5508out_destroy_mbx_cq:
5509 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5510out_destroy_fp_eq:
5511 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
5512 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
5513 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5514out_error:
5515 return rc;
5516}
5517
5518/**
5519 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
5520 * @phba: pointer to lpfc hba data structure.
5521 *
5522 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
5523 * operation.
5524 *
5525 * Return codes
5526 * 0 - sucessful
5527 * ENOMEM - No availble memory
5528 * EIO - The mailbox failed to complete successfully.
5529 **/
5530void
5531lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5532{
5533 int fcp_qidx;
5534
5535 /* Unset mailbox command work queue */
5536 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5537 /* Unset ELS work queue */
5538 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5539 /* Unset unsolicited receive queue */
5540 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
5541 /* Unset FCP work queue */
5542 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5543 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
5544 /* Unset mailbox command complete queue */
5545 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5546 /* Unset ELS complete queue */
5547 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5548 /* Unset unsolicited receive complete queue */
5549 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5550 /* Unset FCP response complete queue */
5551 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5552 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
5553 /* Unset fast-path event queue */
5554 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5555 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
5556 /* Unset slow-path event queue */
5557 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5558}
5559
5560/**
5561 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
5562 * @phba: pointer to lpfc hba data structure.
5563 *
5564 * This routine is invoked to allocate and set up a pool of completion queue
5565 * events. The body of the completion queue event is a completion queue entry
5566 * CQE. For now, this pool is used for the interrupt service routine to queue
5567 * the following HBA completion queue events for the worker thread to process:
5568 * - Mailbox asynchronous events
5569 * - Receive queue completion unsolicited events
5570 * Later, this can be used for all the slow-path events.
5571 *
5572 * Return codes
5573 * 0 - sucessful
5574 * -ENOMEM - No availble memory
5575 **/
5576static int
5577lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
5578{
5579 struct lpfc_cq_event *cq_event;
5580 int i;
5581
5582 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
5583 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
5584 if (!cq_event)
5585 goto out_pool_create_fail;
5586 list_add_tail(&cq_event->list,
5587 &phba->sli4_hba.sp_cqe_event_pool);
5588 }
5589 return 0;
5590
5591out_pool_create_fail:
5592 lpfc_sli4_cq_event_pool_destroy(phba);
5593 return -ENOMEM;
5594}
5595
5596/**
5597 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
5598 * @phba: pointer to lpfc hba data structure.
5599 *
5600 * This routine is invoked to free the pool of completion queue events at
5601 * driver unload time. Note that, it is the responsibility of the driver
5602 * cleanup routine to free all the outstanding completion-queue events
5603 * allocated from this pool back into the pool before invoking this routine
5604 * to destroy the pool.
5605 **/
5606static void
5607lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
5608{
5609 struct lpfc_cq_event *cq_event, *next_cq_event;
5610
5611 list_for_each_entry_safe(cq_event, next_cq_event,
5612 &phba->sli4_hba.sp_cqe_event_pool, list) {
5613 list_del(&cq_event->list);
5614 kfree(cq_event);
5615 }
5616}
5617
5618/**
5619 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5620 * @phba: pointer to lpfc hba data structure.
5621 *
5622 * This routine is the lock free version of the API invoked to allocate a
5623 * completion-queue event from the free pool.
5624 *
5625 * Return: Pointer to the newly allocated completion-queue event if successful
5626 * NULL otherwise.
5627 **/
5628struct lpfc_cq_event *
5629__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5630{
5631 struct lpfc_cq_event *cq_event = NULL;
5632
5633 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
5634 struct lpfc_cq_event, list);
5635 return cq_event;
5636}
5637
5638/**
5639 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5640 * @phba: pointer to lpfc hba data structure.
5641 *
5642 * This routine is the lock version of the API invoked to allocate a
5643 * completion-queue event from the free pool.
5644 *
5645 * Return: Pointer to the newly allocated completion-queue event if successful
5646 * NULL otherwise.
5647 **/
5648struct lpfc_cq_event *
5649lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5650{
5651 struct lpfc_cq_event *cq_event;
5652 unsigned long iflags;
5653
5654 spin_lock_irqsave(&phba->hbalock, iflags);
5655 cq_event = __lpfc_sli4_cq_event_alloc(phba);
5656 spin_unlock_irqrestore(&phba->hbalock, iflags);
5657 return cq_event;
5658}
5659
5660/**
5661 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5662 * @phba: pointer to lpfc hba data structure.
5663 * @cq_event: pointer to the completion queue event to be freed.
5664 *
5665 * This routine is the lock free version of the API invoked to release a
5666 * completion-queue event back into the free pool.
5667 **/
5668void
5669__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5670 struct lpfc_cq_event *cq_event)
5671{
5672 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
5673}
5674
5675/**
5676 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5677 * @phba: pointer to lpfc hba data structure.
5678 * @cq_event: pointer to the completion queue event to be freed.
5679 *
5680 * This routine is the lock version of the API invoked to release a
5681 * completion-queue event back into the free pool.
5682 **/
5683void
5684lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5685 struct lpfc_cq_event *cq_event)
5686{
5687 unsigned long iflags;
5688 spin_lock_irqsave(&phba->hbalock, iflags);
5689 __lpfc_sli4_cq_event_release(phba, cq_event);
5690 spin_unlock_irqrestore(&phba->hbalock, iflags);
5691}
5692
5693/**
5694 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
5695 * @phba: pointer to lpfc hba data structure.
5696 *
5697 * This routine is to free all the pending completion-queue events to the
5698 * back into the free pool for device reset.
5699 **/
5700static void
5701lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5702{
5703 LIST_HEAD(cqelist);
5704 struct lpfc_cq_event *cqe;
5705 unsigned long iflags;
5706
5707 /* Retrieve all the pending WCQEs from pending WCQE lists */
5708 spin_lock_irqsave(&phba->hbalock, iflags);
5709 /* Pending FCP XRI abort events */
5710 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
5711 &cqelist);
5712 /* Pending ELS XRI abort events */
5713 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
5714 &cqelist);
5715 /* Pending asynnc events */
5716 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
5717 &cqelist);
5718 spin_unlock_irqrestore(&phba->hbalock, iflags);
5719
5720 while (!list_empty(&cqelist)) {
5721 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
5722 lpfc_sli4_cq_event_release(phba, cqe);
5723 }
5724}
5725
5726/**
5727 * lpfc_pci_function_reset - Reset pci function.
5728 * @phba: pointer to lpfc hba data structure.
5729 *
5730 * This routine is invoked to request a PCI function reset. It will destroys
5731 * all resources assigned to the PCI function which originates this request.
5732 *
5733 * Return codes
5734 * 0 - sucessful
5735 * ENOMEM - No availble memory
5736 * EIO - The mailbox failed to complete successfully.
5737 **/
5738int
5739lpfc_pci_function_reset(struct lpfc_hba *phba)
5740{
5741 LPFC_MBOXQ_t *mboxq;
5742 uint32_t rc = 0;
5743 uint32_t shdr_status, shdr_add_status;
5744 union lpfc_sli4_cfg_shdr *shdr;
5745
5746 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5747 if (!mboxq) {
5748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5749 "0494 Unable to allocate memory for issuing "
5750 "SLI_FUNCTION_RESET mailbox command\n");
5751 return -ENOMEM;
5752 }
5753
5754 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
5755 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5756 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
5757 LPFC_SLI4_MBX_EMBED);
5758 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5759 shdr = (union lpfc_sli4_cfg_shdr *)
5760 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5761 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5762 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5763 if (rc != MBX_TIMEOUT)
5764 mempool_free(mboxq, phba->mbox_mem_pool);
5765 if (shdr_status || shdr_add_status || rc) {
5766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5767 "0495 SLI_FUNCTION_RESET mailbox failed with "
5768 "status x%x add_status x%x, mbx status x%x\n",
5769 shdr_status, shdr_add_status, rc);
5770 rc = -ENXIO;
5771 }
5772 return rc;
5773}
5774
5775/**
5776 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
5777 * @phba: pointer to lpfc hba data structure.
5778 * @cnt: number of nop mailbox commands to send.
5779 *
5780 * This routine is invoked to send a number @cnt of NOP mailbox command and
5781 * wait for each command to complete.
5782 *
5783 * Return: the number of NOP mailbox command completed.
5784 **/
5785static int
5786lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
5787{
5788 LPFC_MBOXQ_t *mboxq;
5789 int length, cmdsent;
5790 uint32_t mbox_tmo;
5791 uint32_t rc = 0;
5792 uint32_t shdr_status, shdr_add_status;
5793 union lpfc_sli4_cfg_shdr *shdr;
5794
5795 if (cnt == 0) {
5796 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5797 "2518 Requested to send 0 NOP mailbox cmd\n");
5798 return cnt;
5799 }
5800
5801 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5802 if (!mboxq) {
5803 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5804 "2519 Unable to allocate memory for issuing "
5805 "NOP mailbox command\n");
5806 return 0;
5807 }
5808
5809 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
5810 length = (sizeof(struct lpfc_mbx_nop) -
5811 sizeof(struct lpfc_sli4_cfg_mhdr));
5812 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5813 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
5814
5815 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5816 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
5817 if (!phba->sli4_hba.intr_enable)
5818 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5819 else
5820 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
5821 if (rc == MBX_TIMEOUT)
5822 break;
5823 /* Check return status */
5824 shdr = (union lpfc_sli4_cfg_shdr *)
5825 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5826 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5827 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
5828 &shdr->response);
5829 if (shdr_status || shdr_add_status || rc) {
5830 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5831 "2520 NOP mailbox command failed "
5832 "status x%x add_status x%x mbx "
5833 "status x%x\n", shdr_status,
5834 shdr_add_status, rc);
5835 break;
5836 }
5837 }
5838
5839 if (rc != MBX_TIMEOUT)
5840 mempool_free(mboxq, phba->mbox_mem_pool);
5841
5842 return cmdsent;
5843}
5844
5845/**
5846 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
5847 * @phba: pointer to lpfc hba data structure.
5848 * @fcfi: fcf index.
5849 *
5850 * This routine is invoked to unregister a FCFI from device.
5851 **/
5852void
5853lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5854{
5855 LPFC_MBOXQ_t *mbox;
5856 uint32_t mbox_tmo;
5857 int rc;
5858 unsigned long flags;
5859
5860 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5861
5862 if (!mbox)
5863 return;
5864
5865 lpfc_unreg_fcfi(mbox, fcfi);
5866
5867 if (!phba->sli4_hba.intr_enable)
5868 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5869 else {
5870 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5871 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5872 }
5873 if (rc != MBX_TIMEOUT)
5874 mempool_free(mbox, phba->mbox_mem_pool);
5875 if (rc != MBX_SUCCESS)
5876 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5877 "2517 Unregister FCFI command failed "
5878 "status %d, mbxStatus x%x\n", rc,
5879 bf_get(lpfc_mqe_status, &mbox->u.mqe));
5880 else {
5881 spin_lock_irqsave(&phba->hbalock, flags);
5882 /* Mark the FCFI is no longer registered */
5883 phba->fcf.fcf_flag &=
5884 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
5885 spin_unlock_irqrestore(&phba->hbalock, flags);
5886 }
5887}
5888
5889/**
5890 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
5891 * @phba: pointer to lpfc hba data structure.
5892 *
5893 * This routine is invoked to set up the PCI device memory space for device
5894 * with SLI-4 interface spec.
5895 *
5896 * Return codes
5897 * 0 - sucessful
5898 * other values - error
5899 **/
5900static int
5901lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5902{
5903 struct pci_dev *pdev;
5904 unsigned long bar0map_len, bar1map_len, bar2map_len;
5905 int error = -ENODEV;
5906
5907 /* Obtain PCI device reference */
5908 if (!phba->pcidev)
5909 return error;
5910 else
5911 pdev = phba->pcidev;
5912
5913 /* Set the device DMA mask size */
5914 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
5915 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5916 return error;
5917
5918 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
5919 * number of bytes required by each mapping. They are actually
5920 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
5921 */
5922 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
5923 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
5924
5925 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
5926 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
5927
5928 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
5929 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
5930
5931 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
5932 phba->sli4_hba.conf_regs_memmap_p =
5933 ioremap(phba->pci_bar0_map, bar0map_len);
5934 if (!phba->sli4_hba.conf_regs_memmap_p) {
5935 dev_printk(KERN_ERR, &pdev->dev,
5936 "ioremap failed for SLI4 PCI config registers.\n");
5937 goto out;
5938 }
5939
5940 /* Map SLI4 HBA Control Register base to a kernel virtual address. */
5941 phba->sli4_hba.ctrl_regs_memmap_p =
5942 ioremap(phba->pci_bar1_map, bar1map_len);
5943 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
5944 dev_printk(KERN_ERR, &pdev->dev,
5945 "ioremap failed for SLI4 HBA control registers.\n");
5946 goto out_iounmap_conf;
5947 }
5948
5949 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
5950 phba->sli4_hba.drbl_regs_memmap_p =
5951 ioremap(phba->pci_bar2_map, bar2map_len);
5952 if (!phba->sli4_hba.drbl_regs_memmap_p) {
5953 dev_printk(KERN_ERR, &pdev->dev,
5954 "ioremap failed for SLI4 HBA doorbell registers.\n");
5955 goto out_iounmap_ctrl;
5956 }
5957
5958 /* Set up BAR0 PCI config space register memory map */
5959 lpfc_sli4_bar0_register_memmap(phba);
5960
5961 /* Set up BAR1 register memory map */
5962 lpfc_sli4_bar1_register_memmap(phba);
5963
5964 /* Set up BAR2 register memory map */
5965 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
5966 if (error)
5967 goto out_iounmap_all;
5968
5969 return 0;
5970
5971out_iounmap_all:
5972 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
5973out_iounmap_ctrl:
5974 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
5975out_iounmap_conf:
5976 iounmap(phba->sli4_hba.conf_regs_memmap_p);
5977out:
5978 return error;
5979}
5980
5981/**
5982 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
5983 * @phba: pointer to lpfc hba data structure.
5984 *
5985 * This routine is invoked to unset the PCI device memory space for device
5986 * with SLI-4 interface spec.
5987 **/
5988static void
5989lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
5990{
5991 struct pci_dev *pdev;
5992
5993 /* Obtain PCI device reference */
5994 if (!phba->pcidev)
5995 return;
5996 else
5997 pdev = phba->pcidev;
5998
5999 /* Free coherent DMA memory allocated */
6000
6001 /* Unmap I/O memory space */
6002 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6003 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6004 iounmap(phba->sli4_hba.conf_regs_memmap_p);
6005
6006 return;
6007}
6008
6009/**
6010 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6011 * @phba: pointer to lpfc hba data structure.
6012 *
6013 * This routine is invoked to enable the MSI-X interrupt vectors to device
6014 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6015 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6016 * invoked, enables either all or nothing, depending on the current
6017 * availability of PCI vector resources. The device driver is responsible
6018 * for calling the individual request_irq() to register each MSI-X vector
6019 * with a interrupt handler, which is done in this function. Note that
2326 * later when device is unloading, the driver should always call free_irq() 6020 * later when device is unloading, the driver should always call free_irq()
2327 * on all MSI-X vectors it has done request_irq() on before calling 6021 * on all MSI-X vectors it has done request_irq() on before calling
2328 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6022 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
@@ -2333,7 +6027,7 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2333 * other values - error 6027 * other values - error
2334 **/ 6028 **/
2335static int 6029static int
2336lpfc_enable_msix(struct lpfc_hba *phba) 6030lpfc_sli_enable_msix(struct lpfc_hba *phba)
2337{ 6031{
2338 int rc, i; 6032 int rc, i;
2339 LPFC_MBOXQ_t *pmb; 6033 LPFC_MBOXQ_t *pmb;
@@ -2349,20 +6043,21 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2349 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6043 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2350 "0420 PCI enable MSI-X failed (%d)\n", rc); 6044 "0420 PCI enable MSI-X failed (%d)\n", rc);
2351 goto msi_fail_out; 6045 goto msi_fail_out;
2352 } else 6046 }
2353 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6047 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
2354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6048 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2355 "0477 MSI-X entry[%d]: vector=x%x " 6049 "0477 MSI-X entry[%d]: vector=x%x "
2356 "message=%d\n", i, 6050 "message=%d\n", i,
2357 phba->msix_entries[i].vector, 6051 phba->msix_entries[i].vector,
2358 phba->msix_entries[i].entry); 6052 phba->msix_entries[i].entry);
2359 /* 6053 /*
2360 * Assign MSI-X vectors to interrupt handlers 6054 * Assign MSI-X vectors to interrupt handlers
2361 */ 6055 */
2362 6056
2363 /* vector-0 is associated to slow-path handler */ 6057 /* vector-0 is associated to slow-path handler */
2364 rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, 6058 rc = request_irq(phba->msix_entries[0].vector,
2365 IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); 6059 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6060 LPFC_SP_DRIVER_HANDLER_NAME, phba);
2366 if (rc) { 6061 if (rc) {
2367 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6062 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2368 "0421 MSI-X slow-path request_irq failed " 6063 "0421 MSI-X slow-path request_irq failed "
@@ -2371,8 +6066,9 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2371 } 6066 }
2372 6067
2373 /* vector-1 is associated to fast-path handler */ 6068 /* vector-1 is associated to fast-path handler */
2374 rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler, 6069 rc = request_irq(phba->msix_entries[1].vector,
2375 IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); 6070 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6071 LPFC_FP_DRIVER_HANDLER_NAME, phba);
2376 6072
2377 if (rc) { 6073 if (rc) {
2378 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6074 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2401,7 +6097,7 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2401 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6097 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2402 "0351 Config MSI mailbox command failed, " 6098 "0351 Config MSI mailbox command failed, "
2403 "mbxCmd x%x, mbxStatus x%x\n", 6099 "mbxCmd x%x, mbxStatus x%x\n",
2404 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 6100 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
2405 goto mbx_fail_out; 6101 goto mbx_fail_out;
2406 } 6102 }
2407 6103
@@ -2428,14 +6124,14 @@ msi_fail_out:
2428} 6124}
2429 6125
2430/** 6126/**
2431 * lpfc_disable_msix - Disable MSI-X interrupt mode 6127 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
2432 * @phba: pointer to lpfc hba data structure. 6128 * @phba: pointer to lpfc hba data structure.
2433 * 6129 *
2434 * This routine is invoked to release the MSI-X vectors and then disable the 6130 * This routine is invoked to release the MSI-X vectors and then disable the
2435 * MSI-X interrupt mode. 6131 * MSI-X interrupt mode to device with SLI-3 interface spec.
2436 **/ 6132 **/
2437static void 6133static void
2438lpfc_disable_msix(struct lpfc_hba *phba) 6134lpfc_sli_disable_msix(struct lpfc_hba *phba)
2439{ 6135{
2440 int i; 6136 int i;
2441 6137
@@ -2444,23 +6140,26 @@ lpfc_disable_msix(struct lpfc_hba *phba)
2444 free_irq(phba->msix_entries[i].vector, phba); 6140 free_irq(phba->msix_entries[i].vector, phba);
2445 /* Disable MSI-X */ 6141 /* Disable MSI-X */
2446 pci_disable_msix(phba->pcidev); 6142 pci_disable_msix(phba->pcidev);
6143
6144 return;
2447} 6145}
2448 6146
2449/** 6147/**
2450 * lpfc_enable_msi - Enable MSI interrupt mode 6148 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
2451 * @phba: pointer to lpfc hba data structure. 6149 * @phba: pointer to lpfc hba data structure.
2452 * 6150 *
2453 * This routine is invoked to enable the MSI interrupt mode. The kernel 6151 * This routine is invoked to enable the MSI interrupt mode to device with
2454 * function pci_enable_msi() is called to enable the MSI vector. The 6152 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
2455 * device driver is responsible for calling the request_irq() to register 6153 * enable the MSI vector. The device driver is responsible for calling the
2456 * MSI vector with a interrupt the handler, which is done in this function. 6154 * request_irq() to register MSI vector with a interrupt the handler, which
6155 * is done in this function.
2457 * 6156 *
2458 * Return codes 6157 * Return codes
2459 * 0 - sucessful 6158 * 0 - sucessful
2460 * other values - error 6159 * other values - error
2461 */ 6160 */
2462static int 6161static int
2463lpfc_enable_msi(struct lpfc_hba *phba) 6162lpfc_sli_enable_msi(struct lpfc_hba *phba)
2464{ 6163{
2465 int rc; 6164 int rc;
2466 6165
@@ -2474,7 +6173,7 @@ lpfc_enable_msi(struct lpfc_hba *phba)
2474 return rc; 6173 return rc;
2475 } 6174 }
2476 6175
2477 rc = request_irq(phba->pcidev->irq, lpfc_intr_handler, 6176 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
2478 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6177 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2479 if (rc) { 6178 if (rc) {
2480 pci_disable_msi(phba->pcidev); 6179 pci_disable_msi(phba->pcidev);
@@ -2485,17 +6184,17 @@ lpfc_enable_msi(struct lpfc_hba *phba)
2485} 6184}
2486 6185
2487/** 6186/**
2488 * lpfc_disable_msi - Disable MSI interrupt mode 6187 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
2489 * @phba: pointer to lpfc hba data structure. 6188 * @phba: pointer to lpfc hba data structure.
2490 * 6189 *
2491 * This routine is invoked to disable the MSI interrupt mode. The driver 6190 * This routine is invoked to disable the MSI interrupt mode to device with
2492 * calls free_irq() on MSI vector it has done request_irq() on before 6191 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
2493 * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and 6192 * done request_irq() on before calling pci_disable_msi(). Failure to do so
2494 * a device will be left with MSI enabled and leaks its vector. 6193 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6194 * its vector.
2495 */ 6195 */
2496
2497static void 6196static void
2498lpfc_disable_msi(struct lpfc_hba *phba) 6197lpfc_sli_disable_msi(struct lpfc_hba *phba)
2499{ 6198{
2500 free_irq(phba->pcidev->irq, phba); 6199 free_irq(phba->pcidev->irq, phba);
2501 pci_disable_msi(phba->pcidev); 6200 pci_disable_msi(phba->pcidev);
@@ -2503,80 +6202,298 @@ lpfc_disable_msi(struct lpfc_hba *phba)
2503} 6202}
2504 6203
2505/** 6204/**
2506 * lpfc_log_intr_mode - Log the active interrupt mode 6205 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
2507 * @phba: pointer to lpfc hba data structure. 6206 * @phba: pointer to lpfc hba data structure.
2508 * @intr_mode: active interrupt mode adopted.
2509 * 6207 *
2510 * This routine it invoked to log the currently used active interrupt mode 6208 * This routine is invoked to enable device interrupt and associate driver's
2511 * to the device. 6209 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
2512 */ 6210 * spec. Depends on the interrupt mode configured to the driver, the driver
6211 * will try to fallback from the configured interrupt mode to an interrupt
6212 * mode which is supported by the platform, kernel, and device in the order
6213 * of:
6214 * MSI-X -> MSI -> IRQ.
6215 *
6216 * Return codes
6217 * 0 - sucessful
6218 * other values - error
6219 **/
6220static uint32_t
6221lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6222{
6223 uint32_t intr_mode = LPFC_INTR_ERROR;
6224 int retval;
6225
6226 if (cfg_mode == 2) {
6227 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6228 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6229 if (!retval) {
6230 /* Now, try to enable MSI-X interrupt mode */
6231 retval = lpfc_sli_enable_msix(phba);
6232 if (!retval) {
6233 /* Indicate initialization to MSI-X mode */
6234 phba->intr_type = MSIX;
6235 intr_mode = 2;
6236 }
6237 }
6238 }
6239
6240 /* Fallback to MSI if MSI-X initialization failed */
6241 if (cfg_mode >= 1 && phba->intr_type == NONE) {
6242 retval = lpfc_sli_enable_msi(phba);
6243 if (!retval) {
6244 /* Indicate initialization to MSI mode */
6245 phba->intr_type = MSI;
6246 intr_mode = 1;
6247 }
6248 }
6249
6250 /* Fallback to INTx if both MSI-X/MSI initalization failed */
6251 if (phba->intr_type == NONE) {
6252 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6253 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6254 if (!retval) {
6255 /* Indicate initialization to INTx mode */
6256 phba->intr_type = INTx;
6257 intr_mode = 0;
6258 }
6259 }
6260 return intr_mode;
6261}
6262
6263/**
6264 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6265 * @phba: pointer to lpfc hba data structure.
6266 *
6267 * This routine is invoked to disable device interrupt and disassociate the
6268 * driver's interrupt handler(s) from interrupt vector(s) to device with
6269 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6270 * release the interrupt vector(s) for the message signaled interrupt.
6271 **/
2513static void 6272static void
2514lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 6273lpfc_sli_disable_intr(struct lpfc_hba *phba)
2515{ 6274{
2516 switch (intr_mode) { 6275 /* Disable the currently initialized interrupt mode */
2517 case 0: 6276 if (phba->intr_type == MSIX)
2518 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6277 lpfc_sli_disable_msix(phba);
2519 "0470 Enable INTx interrupt mode.\n"); 6278 else if (phba->intr_type == MSI)
2520 break; 6279 lpfc_sli_disable_msi(phba);
2521 case 1: 6280 else if (phba->intr_type == INTx)
6281 free_irq(phba->pcidev->irq, phba);
6282
6283 /* Reset interrupt management states */
6284 phba->intr_type = NONE;
6285 phba->sli.slistat.sli_intr = 0;
6286
6287 return;
6288}
6289
6290/**
6291 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6292 * @phba: pointer to lpfc hba data structure.
6293 *
6294 * This routine is invoked to enable the MSI-X interrupt vectors to device
6295 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6296 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6297 * enables either all or nothing, depending on the current availability of
6298 * PCI vector resources. The device driver is responsible for calling the
6299 * individual request_irq() to register each MSI-X vector with a interrupt
6300 * handler, which is done in this function. Note that later when device is
6301 * unloading, the driver should always call free_irq() on all MSI-X vectors
6302 * it has done request_irq() on before calling pci_disable_msix(). Failure
6303 * to do so results in a BUG_ON() and a device will be left with MSI-X
6304 * enabled and leaks its vectors.
6305 *
6306 * Return codes
6307 * 0 - sucessful
6308 * other values - error
6309 **/
6310static int
6311lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6312{
6313 int rc, index;
6314
6315 /* Set up MSI-X multi-message vectors */
6316 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6317 phba->sli4_hba.msix_entries[index].entry = index;
6318
6319 /* Configure MSI-X capability structure */
6320 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6321 phba->sli4_hba.cfg_eqn);
6322 if (rc) {
2522 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6323 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2523 "0481 Enabled MSI interrupt mode.\n"); 6324 "0484 PCI enable MSI-X failed (%d)\n", rc);
2524 break; 6325 goto msi_fail_out;
2525 case 2: 6326 }
6327 /* Log MSI-X vector assignment */
6328 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
2526 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6329 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2527 "0480 Enabled MSI-X interrupt mode.\n"); 6330 "0489 MSI-X entry[%d]: vector=x%x "
2528 break; 6331 "message=%d\n", index,
2529 default: 6332 phba->sli4_hba.msix_entries[index].vector,
2530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6333 phba->sli4_hba.msix_entries[index].entry);
2531 "0482 Illegal interrupt mode.\n"); 6334 /*
2532 break; 6335 * Assign MSI-X vectors to interrupt handlers
6336 */
6337
6338 /* The first vector must associated to slow-path handler for MQ */
6339 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6340 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6341 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6342 if (rc) {
6343 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6344 "0485 MSI-X slow-path request_irq failed "
6345 "(%d)\n", rc);
6346 goto msi_fail_out;
2533 } 6347 }
2534 return; 6348
6349 /* The rest of the vector(s) are associated to fast-path handler(s) */
6350 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6351 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6352 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6353 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6354 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6355 LPFC_FP_DRIVER_HANDLER_NAME,
6356 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6357 if (rc) {
6358 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6359 "0486 MSI-X fast-path (%d) "
6360 "request_irq failed (%d)\n", index, rc);
6361 goto cfg_fail_out;
6362 }
6363 }
6364
6365 return rc;
6366
6367cfg_fail_out:
6368 /* free the irq already requested */
6369 for (--index; index >= 1; index--)
6370 free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
6371 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6372
6373 /* free the irq already requested */
6374 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6375
6376msi_fail_out:
6377 /* Unconfigure MSI-X capability structure */
6378 pci_disable_msix(phba->pcidev);
6379 return rc;
2535} 6380}
2536 6381
6382/**
6383 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
6384 * @phba: pointer to lpfc hba data structure.
6385 *
6386 * This routine is invoked to release the MSI-X vectors and then disable the
6387 * MSI-X interrupt mode to device with SLI-4 interface spec.
6388 **/
2537static void 6389static void
2538lpfc_stop_port(struct lpfc_hba *phba) 6390lpfc_sli4_disable_msix(struct lpfc_hba *phba)
2539{ 6391{
2540 /* Clear all interrupt enable conditions */ 6392 int index;
2541 writel(0, phba->HCregaddr);
2542 readl(phba->HCregaddr); /* flush */
2543 /* Clear all pending interrupts */
2544 writel(0xffffffff, phba->HAregaddr);
2545 readl(phba->HAregaddr); /* flush */
2546 6393
2547 /* Reset some HBA SLI setup states */ 6394 /* Free up MSI-X multi-message vectors */
2548 lpfc_stop_phba_timers(phba); 6395 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
2549 phba->pport->work_port_events = 0; 6396
6397 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
6398 free_irq(phba->sli4_hba.msix_entries[index].vector,
6399 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6400 /* Disable MSI-X */
6401 pci_disable_msix(phba->pcidev);
6402
6403 return;
6404}
6405
6406/**
6407 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
6408 * @phba: pointer to lpfc hba data structure.
6409 *
6410 * This routine is invoked to enable the MSI interrupt mode to device with
6411 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
6412 * to enable the MSI vector. The device driver is responsible for calling
6413 * the request_irq() to register MSI vector with a interrupt the handler,
6414 * which is done in this function.
6415 *
6416 * Return codes
6417 * 0 - sucessful
6418 * other values - error
6419 **/
6420static int
6421lpfc_sli4_enable_msi(struct lpfc_hba *phba)
6422{
6423 int rc, index;
2550 6424
6425 rc = pci_enable_msi(phba->pcidev);
6426 if (!rc)
6427 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6428 "0487 PCI enable MSI mode success.\n");
6429 else {
6430 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6431 "0488 PCI enable MSI mode failed (%d)\n", rc);
6432 return rc;
6433 }
6434
6435 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6436 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6437 if (rc) {
6438 pci_disable_msi(phba->pcidev);
6439 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6440 "0490 MSI request_irq failed (%d)\n", rc);
6441 }
6442
6443 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
6444 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6445 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6446 }
6447
6448 return rc;
6449}
6450
6451/**
6452 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
6453 * @phba: pointer to lpfc hba data structure.
6454 *
6455 * This routine is invoked to disable the MSI interrupt mode to device with
6456 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
6457 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6458 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6459 * its vector.
6460 **/
6461static void
6462lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6463{
6464 free_irq(phba->pcidev->irq, phba);
6465 pci_disable_msi(phba->pcidev);
2551 return; 6466 return;
2552} 6467}
2553 6468
2554/** 6469/**
2555 * lpfc_enable_intr - Enable device interrupt 6470 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
2556 * @phba: pointer to lpfc hba data structure. 6471 * @phba: pointer to lpfc hba data structure.
2557 * 6472 *
2558 * This routine is invoked to enable device interrupt and associate driver's 6473 * This routine is invoked to enable device interrupt and associate driver's
2559 * interrupt handler(s) to interrupt vector(s). Depends on the interrupt 6474 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
2560 * mode configured to the driver, the driver will try to fallback from the 6475 * interface spec. Depends on the interrupt mode configured to the driver,
2561 * configured interrupt mode to an interrupt mode which is supported by the 6476 * the driver will try to fallback from the configured interrupt mode to an
2562 * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ. 6477 * interrupt mode which is supported by the platform, kernel, and device in
6478 * the order of:
6479 * MSI-X -> MSI -> IRQ.
2563 * 6480 *
2564 * Return codes 6481 * Return codes
2565 * 0 - sucessful 6482 * 0 - sucessful
2566 * other values - error 6483 * other values - error
2567 **/ 6484 **/
2568static uint32_t 6485static uint32_t
2569lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6486lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2570{ 6487{
2571 uint32_t intr_mode = LPFC_INTR_ERROR; 6488 uint32_t intr_mode = LPFC_INTR_ERROR;
2572 int retval; 6489 int retval, index;
2573 6490
2574 if (cfg_mode == 2) { 6491 if (cfg_mode == 2) {
2575 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6492 /* Preparation before conf_msi mbox cmd */
2576 retval = lpfc_sli_config_port(phba, 3); 6493 retval = 0;
2577 if (!retval) { 6494 if (!retval) {
2578 /* Now, try to enable MSI-X interrupt mode */ 6495 /* Now, try to enable MSI-X interrupt mode */
2579 retval = lpfc_enable_msix(phba); 6496 retval = lpfc_sli4_enable_msix(phba);
2580 if (!retval) { 6497 if (!retval) {
2581 /* Indicate initialization to MSI-X mode */ 6498 /* Indicate initialization to MSI-X mode */
2582 phba->intr_type = MSIX; 6499 phba->intr_type = MSIX;
@@ -2587,7 +6504,7 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2587 6504
2588 /* Fallback to MSI if MSI-X initialization failed */ 6505 /* Fallback to MSI if MSI-X initialization failed */
2589 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6506 if (cfg_mode >= 1 && phba->intr_type == NONE) {
2590 retval = lpfc_enable_msi(phba); 6507 retval = lpfc_sli4_enable_msi(phba);
2591 if (!retval) { 6508 if (!retval) {
2592 /* Indicate initialization to MSI mode */ 6509 /* Indicate initialization to MSI mode */
2593 phba->intr_type = MSI; 6510 phba->intr_type = MSI;
@@ -2597,34 +6514,39 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2597 6514
2598 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6515 /* Fallback to INTx if both MSI-X/MSI initalization failed */
2599 if (phba->intr_type == NONE) { 6516 if (phba->intr_type == NONE) {
2600 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, 6517 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
2601 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6518 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2602 if (!retval) { 6519 if (!retval) {
2603 /* Indicate initialization to INTx mode */ 6520 /* Indicate initialization to INTx mode */
2604 phba->intr_type = INTx; 6521 phba->intr_type = INTx;
2605 intr_mode = 0; 6522 intr_mode = 0;
6523 for (index = 0; index < phba->cfg_fcp_eq_count;
6524 index++) {
6525 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6526 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6527 }
2606 } 6528 }
2607 } 6529 }
2608 return intr_mode; 6530 return intr_mode;
2609} 6531}
2610 6532
2611/** 6533/**
2612 * lpfc_disable_intr - Disable device interrupt 6534 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
2613 * @phba: pointer to lpfc hba data structure. 6535 * @phba: pointer to lpfc hba data structure.
2614 * 6536 *
2615 * This routine is invoked to disable device interrupt and disassociate the 6537 * This routine is invoked to disable device interrupt and disassociate
2616 * driver's interrupt handler(s) from interrupt vector(s). Depending on the 6538 * the driver's interrupt handler(s) from interrupt vector(s) to device
2617 * interrupt mode, the driver will release the interrupt vector(s) for the 6539 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
2618 * message signaled interrupt. 6540 * will release the interrupt vector(s) for the message signaled interrupt.
2619 **/ 6541 **/
2620static void 6542static void
2621lpfc_disable_intr(struct lpfc_hba *phba) 6543lpfc_sli4_disable_intr(struct lpfc_hba *phba)
2622{ 6544{
2623 /* Disable the currently initialized interrupt mode */ 6545 /* Disable the currently initialized interrupt mode */
2624 if (phba->intr_type == MSIX) 6546 if (phba->intr_type == MSIX)
2625 lpfc_disable_msix(phba); 6547 lpfc_sli4_disable_msix(phba);
2626 else if (phba->intr_type == MSI) 6548 else if (phba->intr_type == MSI)
2627 lpfc_disable_msi(phba); 6549 lpfc_sli4_disable_msi(phba);
2628 else if (phba->intr_type == INTx) 6550 else if (phba->intr_type == INTx)
2629 free_irq(phba->pcidev->irq, phba); 6551 free_irq(phba->pcidev->irq, phba);
2630 6552
@@ -2636,263 +6558,233 @@ lpfc_disable_intr(struct lpfc_hba *phba)
2636} 6558}
2637 6559
2638/** 6560/**
2639 * lpfc_pci_probe_one - lpfc PCI probe func to register device to PCI subsystem 6561 * lpfc_unset_hba - Unset SLI3 hba device initialization
2640 * @pdev: pointer to PCI device 6562 * @phba: pointer to lpfc hba data structure.
2641 * @pid: pointer to PCI device identifier
2642 *
2643 * This routine is to be registered to the kernel's PCI subsystem. When an
2644 * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at
2645 * PCI device-specific information of the device and driver to see if the
2646 * driver state that it can support this kind of device. If the match is
2647 * successful, the driver core invokes this routine. If this routine
2648 * determines it can claim the HBA, it does all the initialization that it
2649 * needs to do to handle the HBA properly.
2650 * 6563 *
2651 * Return code 6564 * This routine is invoked to unset the HBA device initialization steps to
2652 * 0 - driver can claim the device 6565 * a device with SLI-3 interface spec.
2653 * negative value - driver can not claim the device
2654 **/ 6566 **/
2655static int __devinit 6567static void
2656lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 6568lpfc_unset_hba(struct lpfc_hba *phba)
2657{ 6569{
2658 struct lpfc_vport *vport = NULL; 6570 struct lpfc_vport *vport = phba->pport;
2659 struct lpfc_hba *phba; 6571 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2660 struct lpfc_sli *psli;
2661 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
2662 struct Scsi_Host *shost = NULL;
2663 void *ptr;
2664 unsigned long bar0map_len, bar2map_len;
2665 int error = -ENODEV, retval;
2666 int i, hbq_count;
2667 uint16_t iotag;
2668 uint32_t cfg_mode, intr_mode;
2669 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2670 struct lpfc_adapter_event_header adapter_event;
2671
2672 if (pci_enable_device_mem(pdev))
2673 goto out;
2674 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
2675 goto out_disable_device;
2676
2677 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL);
2678 if (!phba)
2679 goto out_release_regions;
2680 6572
2681 atomic_set(&phba->fast_event_count, 0); 6573 spin_lock_irq(shost->host_lock);
2682 spin_lock_init(&phba->hbalock); 6574 vport->load_flag |= FC_UNLOADING;
6575 spin_unlock_irq(shost->host_lock);
2683 6576
2684 /* Initialize ndlp management spinlock */ 6577 lpfc_stop_hba_timers(phba);
2685 spin_lock_init(&phba->ndlp_lock);
2686 6578
2687 phba->pcidev = pdev; 6579 phba->pport->work_port_events = 0;
2688 6580
2689 /* Assign an unused board number */ 6581 lpfc_sli_hba_down(phba);
2690 if ((phba->brd_no = lpfc_get_instance()) < 0)
2691 goto out_free_phba;
2692 6582
2693 INIT_LIST_HEAD(&phba->port_list); 6583 lpfc_sli_brdrestart(phba);
2694 init_waitqueue_head(&phba->wait_4_mlo_m_q);
2695 /*
2696 * Get all the module params for configuring this host and then
2697 * establish the host.
2698 */
2699 lpfc_get_cfgparam(phba);
2700 phba->max_vpi = LPFC_MAX_VPI;
2701 6584
2702 /* Initialize timers used by driver */ 6585 lpfc_sli_disable_intr(phba);
2703 init_timer(&phba->hb_tmofunc);
2704 phba->hb_tmofunc.function = lpfc_hb_timeout;
2705 phba->hb_tmofunc.data = (unsigned long)phba;
2706 6586
2707 psli = &phba->sli; 6587 return;
2708 init_timer(&psli->mbox_tmo); 6588}
2709 psli->mbox_tmo.function = lpfc_mbox_timeout;
2710 psli->mbox_tmo.data = (unsigned long) phba;
2711 init_timer(&phba->fcp_poll_timer);
2712 phba->fcp_poll_timer.function = lpfc_poll_timeout;
2713 phba->fcp_poll_timer.data = (unsigned long) phba;
2714 init_timer(&phba->fabric_block_timer);
2715 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
2716 phba->fabric_block_timer.data = (unsigned long) phba;
2717 init_timer(&phba->eratt_poll);
2718 phba->eratt_poll.function = lpfc_poll_eratt;
2719 phba->eratt_poll.data = (unsigned long) phba;
2720 6589
2721 pci_set_master(pdev); 6590/**
2722 pci_save_state(pdev); 6591 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
2723 pci_try_set_mwi(pdev); 6592 * @phba: pointer to lpfc hba data structure.
6593 *
6594 * This routine is invoked to unset the HBA device initialization steps to
6595 * a device with SLI-4 interface spec.
6596 **/
6597static void
6598lpfc_sli4_unset_hba(struct lpfc_hba *phba)
6599{
6600 struct lpfc_vport *vport = phba->pport;
6601 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2724 6602
2725 if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0) 6603 spin_lock_irq(shost->host_lock);
2726 if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(32)) != 0) 6604 vport->load_flag |= FC_UNLOADING;
2727 goto out_idr_remove; 6605 spin_unlock_irq(shost->host_lock);
2728 6606
2729 /* 6607 phba->pport->work_port_events = 0;
2730 * Get the bus address of Bar0 and Bar2 and the number of bytes
2731 * required by each mapping.
2732 */
2733 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
2734 bar0map_len = pci_resource_len(phba->pcidev, 0);
2735 6608
2736 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); 6609 lpfc_sli4_hba_down(phba);
2737 bar2map_len = pci_resource_len(phba->pcidev, 2);
2738 6610
2739 /* Map HBA SLIM to a kernel virtual address. */ 6611 lpfc_sli4_disable_intr(phba);
2740 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
2741 if (!phba->slim_memmap_p) {
2742 error = -ENODEV;
2743 dev_printk(KERN_ERR, &pdev->dev,
2744 "ioremap failed for SLIM memory.\n");
2745 goto out_idr_remove;
2746 }
2747 6612
2748 /* Map HBA Control Registers to a kernel virtual address. */ 6613 return;
2749 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 6614}
2750 if (!phba->ctrl_regs_memmap_p) {
2751 error = -ENODEV;
2752 dev_printk(KERN_ERR, &pdev->dev,
2753 "ioremap failed for HBA control registers.\n");
2754 goto out_iounmap_slim;
2755 }
2756 6615
2757 /* Allocate memory for SLI-2 structures */ 6616/**
2758 phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev, 6617 * lpfc_sli4_hba_unset - Unset the fcoe hba
2759 SLI2_SLIM_SIZE, 6618 * @phba: Pointer to HBA context object.
2760 &phba->slim2p.phys, 6619 *
2761 GFP_KERNEL); 6620 * This function is called in the SLI4 code path to reset the HBA's FCoE
2762 if (!phba->slim2p.virt) 6621 * function. The caller is not required to hold any lock. This routine
2763 goto out_iounmap; 6622 * issues PCI function reset mailbox command to reset the FCoE function.
6623 * At the end of the function, it calls lpfc_hba_down_post function to
6624 * free any pending commands.
6625 **/
6626static void
6627lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6628{
6629 int wait_cnt = 0;
6630 LPFC_MBOXQ_t *mboxq;
2764 6631
2765 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 6632 lpfc_stop_hba_timers(phba);
2766 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 6633 phba->sli4_hba.intr_enable = 0;
2767 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
2768 phba->IOCBs = (phba->slim2p.virt +
2769 offsetof(struct lpfc_sli2_slim, IOCBs));
2770 6634
2771 phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, 6635 /*
2772 lpfc_sli_hbq_size(), 6636 * Gracefully wait out the potential current outstanding asynchronous
2773 &phba->hbqslimp.phys, 6637 * mailbox command.
2774 GFP_KERNEL); 6638 */
2775 if (!phba->hbqslimp.virt)
2776 goto out_free_slim;
2777 6639
2778 hbq_count = lpfc_sli_hbq_count(); 6640 /* First, block any pending async mailbox command from posted */
2779 ptr = phba->hbqslimp.virt; 6641 spin_lock_irq(&phba->hbalock);
2780 for (i = 0; i < hbq_count; ++i) { 6642 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
2781 phba->hbqs[i].hbq_virt = ptr; 6643 spin_unlock_irq(&phba->hbalock);
2782 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 6644 /* Now, trying to wait it out if we can */
2783 ptr += (lpfc_hbq_defs[i]->entry_count * 6645 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2784 sizeof(struct lpfc_hbq_entry)); 6646 msleep(10);
6647 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
6648 break;
2785 } 6649 }
2786 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 6650 /* Forcefully release the outstanding mailbox command if timed out */
2787 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 6651 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2788 6652 spin_lock_irq(&phba->hbalock);
2789 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 6653 mboxq = phba->sli.mbox_active;
2790 6654 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
2791 INIT_LIST_HEAD(&phba->hbqbuf_in_list); 6655 __lpfc_mbox_cmpl_put(phba, mboxq);
2792 6656 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2793 /* Initialize the SLI Layer to run with lpfc HBAs. */ 6657 phba->sli.mbox_active = NULL;
2794 lpfc_sli_setup(phba); 6658 spin_unlock_irq(&phba->hbalock);
2795 lpfc_sli_queue_setup(phba);
2796
2797 retval = lpfc_mem_alloc(phba);
2798 if (retval) {
2799 error = retval;
2800 goto out_free_hbqslimp;
2801 } 6659 }
2802 6660
2803 /* Initialize and populate the iocb list per host. */ 6661 /* Tear down the queues in the HBA */
2804 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 6662 lpfc_sli4_queue_unset(phba);
2805 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
2806 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
2807 if (iocbq_entry == NULL) {
2808 printk(KERN_ERR "%s: only allocated %d iocbs of "
2809 "expected %d count. Unloading driver.\n",
2810 __func__, i, LPFC_IOCB_LIST_CNT);
2811 error = -ENOMEM;
2812 goto out_free_iocbq;
2813 }
2814 6663
2815 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 6664 /* Disable PCI subsystem interrupt */
2816 if (iotag == 0) { 6665 lpfc_sli4_disable_intr(phba);
2817 kfree (iocbq_entry);
2818 printk(KERN_ERR "%s: failed to allocate IOTAG. "
2819 "Unloading driver.\n",
2820 __func__);
2821 error = -ENOMEM;
2822 goto out_free_iocbq;
2823 }
2824 6666
2825 spin_lock_irq(&phba->hbalock); 6667 /* Stop kthread signal shall trigger work_done one more time */
2826 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 6668 kthread_stop(phba->worker_thread);
2827 phba->total_iocbq_bufs++;
2828 spin_unlock_irq(&phba->hbalock);
2829 }
2830 6669
2831 /* Initialize HBA structure */ 6670 /* Stop the SLI4 device port */
2832 phba->fc_edtov = FF_DEF_EDTOV; 6671 phba->pport->work_port_events = 0;
2833 phba->fc_ratov = FF_DEF_RATOV; 6672}
2834 phba->fc_altov = FF_DEF_ALTOV;
2835 phba->fc_arbtov = FF_DEF_ARBTOV;
2836 6673
2837 INIT_LIST_HEAD(&phba->work_list); 6674/**
2838 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 6675 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
2839 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 6676 * @pdev: pointer to PCI device
6677 * @pid: pointer to PCI device identifier
6678 *
6679 * This routine is to be called to attach a device with SLI-3 interface spec
6680 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6681 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
6682 * information of the device and driver to see if the driver state that it can
6683 * support this kind of device. If the match is successful, the driver core
6684 * invokes this routine. If this routine determines it can claim the HBA, it
6685 * does all the initialization that it needs to do to handle the HBA properly.
6686 *
6687 * Return code
6688 * 0 - driver can claim the device
6689 * negative value - driver can not claim the device
6690 **/
6691static int __devinit
6692lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6693{
6694 struct lpfc_hba *phba;
6695 struct lpfc_vport *vport = NULL;
6696 int error;
6697 uint32_t cfg_mode, intr_mode;
2840 6698
2841 /* Initialize the wait queue head for the kernel thread */ 6699 /* Allocate memory for HBA structure */
2842 init_waitqueue_head(&phba->work_waitq); 6700 phba = lpfc_hba_alloc(pdev);
6701 if (!phba)
6702 return -ENOMEM;
2843 6703
2844 /* Startup the kernel thread for this host adapter. */ 6704 /* Perform generic PCI device enabling operation */
2845 phba->worker_thread = kthread_run(lpfc_do_work, phba, 6705 error = lpfc_enable_pci_dev(phba);
2846 "lpfc_worker_%d", phba->brd_no); 6706 if (error) {
2847 if (IS_ERR(phba->worker_thread)) { 6707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2848 error = PTR_ERR(phba->worker_thread); 6708 "1401 Failed to enable pci device.\n");
2849 goto out_free_iocbq; 6709 goto out_free_phba;
2850 } 6710 }
2851 6711
2852 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 6712 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
2853 spin_lock_init(&phba->scsi_buf_list_lock); 6713 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
2854 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 6714 if (error)
6715 goto out_disable_pci_dev;
2855 6716
2856 /* Initialize list of fabric iocbs */ 6717 /* Set up SLI-3 specific device PCI memory space */
2857 INIT_LIST_HEAD(&phba->fabric_iocb_list); 6718 error = lpfc_sli_pci_mem_setup(phba);
6719 if (error) {
6720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6721 "1402 Failed to set up pci memory space.\n");
6722 goto out_disable_pci_dev;
6723 }
2858 6724
2859 /* Initialize list to save ELS buffers */ 6725 /* Set up phase-1 common device driver resources */
2860 INIT_LIST_HEAD(&phba->elsbuf); 6726 error = lpfc_setup_driver_resource_phase1(phba);
6727 if (error) {
6728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6729 "1403 Failed to set up driver resource.\n");
6730 goto out_unset_pci_mem_s3;
6731 }
2861 6732
2862 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 6733 /* Set up SLI-3 specific device driver resources */
2863 if (!vport) 6734 error = lpfc_sli_driver_resource_setup(phba);
2864 goto out_kthread_stop; 6735 if (error) {
6736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6737 "1404 Failed to set up driver resource.\n");
6738 goto out_unset_pci_mem_s3;
6739 }
2865 6740
2866 shost = lpfc_shost_from_vport(vport); 6741 /* Initialize and populate the iocb list per host */
2867 phba->pport = vport; 6742 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
2868 lpfc_debugfs_initialize(vport); 6743 if (error) {
6744 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6745 "1405 Failed to initialize iocb list.\n");
6746 goto out_unset_driver_resource_s3;
6747 }
2869 6748
2870 pci_set_drvdata(pdev, shost); 6749 /* Set up common device driver resources */
6750 error = lpfc_setup_driver_resource_phase2(phba);
6751 if (error) {
6752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6753 "1406 Failed to set up driver resource.\n");
6754 goto out_free_iocb_list;
6755 }
2871 6756
2872 phba->MBslimaddr = phba->slim_memmap_p; 6757 /* Create SCSI host to the physical port */
2873 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 6758 error = lpfc_create_shost(phba);
2874 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 6759 if (error) {
2875 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 6760 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2876 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 6761 "1407 Failed to create scsi host.\n");
6762 goto out_unset_driver_resource;
6763 }
2877 6764
2878 /* Configure sysfs attributes */ 6765 /* Configure sysfs attributes */
2879 if (lpfc_alloc_sysfs_attr(vport)) { 6766 vport = phba->pport;
6767 error = lpfc_alloc_sysfs_attr(vport);
6768 if (error) {
2880 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6769 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2881 "1476 Failed to allocate sysfs attr\n"); 6770 "1476 Failed to allocate sysfs attr\n");
2882 error = -ENOMEM; 6771 goto out_destroy_shost;
2883 goto out_destroy_port;
2884 } 6772 }
2885 6773
6774 /* Now, trying to enable interrupt and bring up the device */
2886 cfg_mode = phba->cfg_use_msi; 6775 cfg_mode = phba->cfg_use_msi;
2887 while (true) { 6776 while (true) {
6777 /* Put device to a known state before enabling interrupt */
6778 lpfc_stop_port(phba);
2888 /* Configure and enable interrupt */ 6779 /* Configure and enable interrupt */
2889 intr_mode = lpfc_enable_intr(phba, cfg_mode); 6780 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
2890 if (intr_mode == LPFC_INTR_ERROR) { 6781 if (intr_mode == LPFC_INTR_ERROR) {
2891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2892 "0426 Failed to enable interrupt.\n"); 6783 "0431 Failed to enable interrupt.\n");
6784 error = -ENODEV;
2893 goto out_free_sysfs_attr; 6785 goto out_free_sysfs_attr;
2894 } 6786 }
2895 /* HBA SLI setup */ 6787 /* SLI-3 HBA setup */
2896 if (lpfc_sli_hba_setup(phba)) { 6788 if (lpfc_sli_hba_setup(phba)) {
2897 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2898 "1477 Failed to set up hba\n"); 6790 "1477 Failed to set up hba\n");
@@ -2902,185 +6794,65 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2902 6794
2903 /* Wait 50ms for the interrupts of previous mailbox commands */ 6795 /* Wait 50ms for the interrupts of previous mailbox commands */
2904 msleep(50); 6796 msleep(50);
2905 /* Check active interrupts received */ 6797 /* Check active interrupts on message signaled interrupts */
2906 if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 6798 if (intr_mode == 0 ||
6799 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
2907 /* Log the current active interrupt mode */ 6800 /* Log the current active interrupt mode */
2908 phba->intr_mode = intr_mode; 6801 phba->intr_mode = intr_mode;
2909 lpfc_log_intr_mode(phba, intr_mode); 6802 lpfc_log_intr_mode(phba, intr_mode);
2910 break; 6803 break;
2911 } else { 6804 } else {
2912 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6805 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2913 "0451 Configure interrupt mode (%d) " 6806 "0447 Configure interrupt mode (%d) "
2914 "failed active interrupt test.\n", 6807 "failed active interrupt test.\n",
2915 intr_mode); 6808 intr_mode);
2916 if (intr_mode == 0) {
2917 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2918 "0479 Failed to enable "
2919 "interrupt.\n");
2920 error = -ENODEV;
2921 goto out_remove_device;
2922 }
2923 /* Stop HBA SLI setups */
2924 lpfc_stop_port(phba);
2925 /* Disable the current interrupt mode */ 6809 /* Disable the current interrupt mode */
2926 lpfc_disable_intr(phba); 6810 lpfc_sli_disable_intr(phba);
2927 /* Try next level of interrupt mode */ 6811 /* Try next level of interrupt mode */
2928 cfg_mode = --intr_mode; 6812 cfg_mode = --intr_mode;
2929 } 6813 }
2930 } 6814 }
2931 6815
2932 /* 6816 /* Perform post initialization setup */
2933 * hba setup may have changed the hba_queue_depth so we need to adjust 6817 lpfc_post_init_setup(phba);
2934 * the value of can_queue.
2935 */
2936 shost->can_queue = phba->cfg_hba_queue_depth - 10;
2937 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
2938
2939 if (lpfc_prot_mask && lpfc_prot_guard) {
2940 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2941 "1478 Registering BlockGuard with the "
2942 "SCSI layer\n");
2943 6818
2944 scsi_host_set_prot(shost, lpfc_prot_mask); 6819 /* Check if there are static vports to be created. */
2945 scsi_host_set_guard(shost, lpfc_prot_guard); 6820 lpfc_create_static_vport(phba);
2946 }
2947 }
2948
2949 if (!_dump_buf_data) {
2950 int pagecnt = 10;
2951 while (pagecnt) {
2952 spin_lock_init(&_dump_buf_lock);
2953 _dump_buf_data =
2954 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2955 if (_dump_buf_data) {
2956 printk(KERN_ERR "BLKGRD allocated %d pages for "
2957 "_dump_buf_data at 0x%p\n",
2958 (1 << pagecnt), _dump_buf_data);
2959 _dump_buf_data_order = pagecnt;
2960 memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT)
2961 << pagecnt));
2962 break;
2963 } else {
2964 --pagecnt;
2965 }
2966
2967 }
2968
2969 if (!_dump_buf_data_order)
2970 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
2971 "memory for hexdump\n");
2972
2973 } else {
2974 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
2975 "\n", _dump_buf_data);
2976 }
2977
2978
2979 if (!_dump_buf_dif) {
2980 int pagecnt = 10;
2981 while (pagecnt) {
2982 _dump_buf_dif =
2983 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2984 if (_dump_buf_dif) {
2985 printk(KERN_ERR "BLKGRD allocated %d pages for "
2986 "_dump_buf_dif at 0x%p\n",
2987 (1 << pagecnt), _dump_buf_dif);
2988 _dump_buf_dif_order = pagecnt;
2989 memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT)
2990 << pagecnt));
2991 break;
2992 } else {
2993 --pagecnt;
2994 }
2995
2996 }
2997
2998 if (!_dump_buf_dif_order)
2999 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
3000 "memory for hexdump\n");
3001
3002 } else {
3003 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
3004 _dump_buf_dif);
3005 }
3006
3007 lpfc_host_attrib_init(shost);
3008
3009 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3010 spin_lock_irq(shost->host_lock);
3011 lpfc_poll_start_timer(phba);
3012 spin_unlock_irq(shost->host_lock);
3013 }
3014
3015 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3016 "0428 Perform SCSI scan\n");
3017 /* Send board arrival event to upper layer */
3018 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
3019 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
3020 fc_host_post_vendor_event(shost, fc_get_event_number(),
3021 sizeof(adapter_event),
3022 (char *) &adapter_event,
3023 LPFC_NL_VENDOR_ID);
3024 6821
3025 return 0; 6822 return 0;
3026 6823
3027out_remove_device: 6824out_remove_device:
3028 spin_lock_irq(shost->host_lock); 6825 lpfc_unset_hba(phba);
3029 vport->load_flag |= FC_UNLOADING;
3030 spin_unlock_irq(shost->host_lock);
3031 lpfc_stop_phba_timers(phba);
3032 phba->pport->work_port_events = 0;
3033 lpfc_disable_intr(phba);
3034 lpfc_sli_hba_down(phba);
3035 lpfc_sli_brdrestart(phba);
3036out_free_sysfs_attr: 6826out_free_sysfs_attr:
3037 lpfc_free_sysfs_attr(vport); 6827 lpfc_free_sysfs_attr(vport);
3038out_destroy_port: 6828out_destroy_shost:
3039 destroy_port(vport); 6829 lpfc_destroy_shost(phba);
3040out_kthread_stop: 6830out_unset_driver_resource:
3041 kthread_stop(phba->worker_thread); 6831 lpfc_unset_driver_resource_phase2(phba);
3042out_free_iocbq: 6832out_free_iocb_list:
3043 list_for_each_entry_safe(iocbq_entry, iocbq_next, 6833 lpfc_free_iocb_list(phba);
3044 &phba->lpfc_iocb_list, list) { 6834out_unset_driver_resource_s3:
3045 kfree(iocbq_entry); 6835 lpfc_sli_driver_resource_unset(phba);
3046 phba->total_iocbq_bufs--; 6836out_unset_pci_mem_s3:
3047 } 6837 lpfc_sli_pci_mem_unset(phba);
3048 lpfc_mem_free(phba); 6838out_disable_pci_dev:
3049out_free_hbqslimp: 6839 lpfc_disable_pci_dev(phba);
3050 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
3051 phba->hbqslimp.virt, phba->hbqslimp.phys);
3052out_free_slim:
3053 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
3054 phba->slim2p.virt, phba->slim2p.phys);
3055out_iounmap:
3056 iounmap(phba->ctrl_regs_memmap_p);
3057out_iounmap_slim:
3058 iounmap(phba->slim_memmap_p);
3059out_idr_remove:
3060 idr_remove(&lpfc_hba_index, phba->brd_no);
3061out_free_phba: 6840out_free_phba:
3062 kfree(phba); 6841 lpfc_hba_free(phba);
3063out_release_regions:
3064 pci_release_selected_regions(pdev, bars);
3065out_disable_device:
3066 pci_disable_device(pdev);
3067out:
3068 pci_set_drvdata(pdev, NULL);
3069 if (shost)
3070 scsi_host_put(shost);
3071 return error; 6842 return error;
3072} 6843}
3073 6844
3074/** 6845/**
3075 * lpfc_pci_remove_one - lpfc PCI func to unregister device from PCI subsystem 6846 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
3076 * @pdev: pointer to PCI device 6847 * @pdev: pointer to PCI device
3077 * 6848 *
3078 * This routine is to be registered to the kernel's PCI subsystem. When an 6849 * This routine is to be called to disattach a device with SLI-3 interface
3079 * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup 6850 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
3080 * for the HBA device to be removed from the PCI subsystem properly. 6851 * removed from PCI bus, it performs all the necessary cleanup for the HBA
6852 * device to be removed from the PCI subsystem properly.
3081 **/ 6853 **/
3082static void __devexit 6854static void __devexit
3083lpfc_pci_remove_one(struct pci_dev *pdev) 6855lpfc_pci_remove_one_s3(struct pci_dev *pdev)
3084{ 6856{
3085 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6857 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3086 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6858 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -3098,7 +6870,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3098 /* Release all the vports against this physical port */ 6870 /* Release all the vports against this physical port */
3099 vports = lpfc_create_vport_work_array(phba); 6871 vports = lpfc_create_vport_work_array(phba);
3100 if (vports != NULL) 6872 if (vports != NULL)
3101 for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++) 6873 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
3102 fc_vport_terminate(vports[i]->fc_vport); 6874 fc_vport_terminate(vports[i]->fc_vport);
3103 lpfc_destroy_vport_work_array(phba, vports); 6875 lpfc_destroy_vport_work_array(phba, vports);
3104 6876
@@ -3120,7 +6892,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3120 /* Final cleanup of txcmplq and reset the HBA */ 6892 /* Final cleanup of txcmplq and reset the HBA */
3121 lpfc_sli_brdrestart(phba); 6893 lpfc_sli_brdrestart(phba);
3122 6894
3123 lpfc_stop_phba_timers(phba); 6895 lpfc_stop_hba_timers(phba);
3124 spin_lock_irq(&phba->hbalock); 6896 spin_lock_irq(&phba->hbalock);
3125 list_del_init(&vport->listentry); 6897 list_del_init(&vport->listentry);
3126 spin_unlock_irq(&phba->hbalock); 6898 spin_unlock_irq(&phba->hbalock);
@@ -3128,7 +6900,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3128 lpfc_debugfs_terminate(vport); 6900 lpfc_debugfs_terminate(vport);
3129 6901
3130 /* Disable interrupt */ 6902 /* Disable interrupt */
3131 lpfc_disable_intr(phba); 6903 lpfc_sli_disable_intr(phba);
3132 6904
3133 pci_set_drvdata(pdev, NULL); 6905 pci_set_drvdata(pdev, NULL);
3134 scsi_host_put(shost); 6906 scsi_host_put(shost);
@@ -3138,7 +6910,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3138 * corresponding pools here. 6910 * corresponding pools here.
3139 */ 6911 */
3140 lpfc_scsi_free(phba); 6912 lpfc_scsi_free(phba);
3141 lpfc_mem_free(phba); 6913 lpfc_mem_free_all(phba);
3142 6914
3143 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 6915 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
3144 phba->hbqslimp.virt, phba->hbqslimp.phys); 6916 phba->hbqslimp.virt, phba->hbqslimp.phys);
@@ -3151,36 +6923,35 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3151 iounmap(phba->ctrl_regs_memmap_p); 6923 iounmap(phba->ctrl_regs_memmap_p);
3152 iounmap(phba->slim_memmap_p); 6924 iounmap(phba->slim_memmap_p);
3153 6925
3154 idr_remove(&lpfc_hba_index, phba->brd_no); 6926 lpfc_hba_free(phba);
3155
3156 kfree(phba);
3157 6927
3158 pci_release_selected_regions(pdev, bars); 6928 pci_release_selected_regions(pdev, bars);
3159 pci_disable_device(pdev); 6929 pci_disable_device(pdev);
3160} 6930}
3161 6931
3162/** 6932/**
3163 * lpfc_pci_suspend_one - lpfc PCI func to suspend device for power management 6933 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
3164 * @pdev: pointer to PCI device 6934 * @pdev: pointer to PCI device
3165 * @msg: power management message 6935 * @msg: power management message
3166 * 6936 *
3167 * This routine is to be registered to the kernel's PCI subsystem to support 6937 * This routine is to be called from the kernel's PCI subsystem to support
3168 * system Power Management (PM). When PM invokes this method, it quiesces the 6938 * system Power Management (PM) to device with SLI-3 interface spec. When
3169 * device by stopping the driver's worker thread for the device, turning off 6939 * PM invokes this method, it quiesces the device by stopping the driver's
3170 * device's interrupt and DMA, and bring the device offline. Note that as the 6940 * worker thread for the device, turning off device's interrupt and DMA,
3171 * driver implements the minimum PM requirements to a power-aware driver's PM 6941 * and bring the device offline. Note that as the driver implements the
3172 * support for suspend/resume -- all the possible PM messages (SUSPEND, 6942 * minimum PM requirements to a power-aware driver's PM support for the
3173 * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND 6943 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
3174 * and the driver will fully reinitialize its device during resume() method 6944 * to the suspend() method call will be treated as SUSPEND and the driver will
3175 * call, the driver will set device to PCI_D3hot state in PCI config space 6945 * fully reinitialize its device during resume() method call, the driver will
3176 * instead of setting it according to the @msg provided by the PM. 6946 * set device to PCI_D3hot state in PCI config space instead of setting it
6947 * according to the @msg provided by the PM.
3177 * 6948 *
3178 * Return code 6949 * Return code
3179 * 0 - driver suspended the device 6950 * 0 - driver suspended the device
3180 * Error otherwise 6951 * Error otherwise
3181 **/ 6952 **/
3182static int 6953static int
3183lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 6954lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
3184{ 6955{
3185 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6956 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3186 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 6957 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3194,7 +6965,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3194 kthread_stop(phba->worker_thread); 6965 kthread_stop(phba->worker_thread);
3195 6966
3196 /* Disable interrupt from device */ 6967 /* Disable interrupt from device */
3197 lpfc_disable_intr(phba); 6968 lpfc_sli_disable_intr(phba);
3198 6969
3199 /* Save device state to PCI config space */ 6970 /* Save device state to PCI config space */
3200 pci_save_state(pdev); 6971 pci_save_state(pdev);
@@ -3204,25 +6975,26 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3204} 6975}
3205 6976
3206/** 6977/**
3207 * lpfc_pci_resume_one - lpfc PCI func to resume device for power management 6978 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
3208 * @pdev: pointer to PCI device 6979 * @pdev: pointer to PCI device
3209 * 6980 *
3210 * This routine is to be registered to the kernel's PCI subsystem to support 6981 * This routine is to be called from the kernel's PCI subsystem to support
3211 * system Power Management (PM). When PM invokes this method, it restores 6982 * system Power Management (PM) to device with SLI-3 interface spec. When PM
3212 * the device's PCI config space state and fully reinitializes the device 6983 * invokes this method, it restores the device's PCI config space state and
3213 * and brings it online. Note that as the driver implements the minimum PM 6984 * fully reinitializes the device and brings it online. Note that as the
3214 * requirements to a power-aware driver's PM for suspend/resume -- all 6985 * driver implements the minimum PM requirements to a power-aware driver's
3215 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 6986 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
3216 * method call will be treated as SUSPEND and the driver will fully 6987 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
3217 * reinitialize its device during resume() method call, the device will be 6988 * driver will fully reinitialize its device during resume() method call,
3218 * set to PCI_D0 directly in PCI config space before restoring the state. 6989 * the device will be set to PCI_D0 directly in PCI config space before
6990 * restoring the state.
3219 * 6991 *
3220 * Return code 6992 * Return code
3221 * 0 - driver suspended the device 6993 * 0 - driver suspended the device
3222 * Error otherwise 6994 * Error otherwise
3223 **/ 6995 **/
3224static int 6996static int
3225lpfc_pci_resume_one(struct pci_dev *pdev) 6997lpfc_pci_resume_one_s3(struct pci_dev *pdev)
3226{ 6998{
3227 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6999 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3228 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7000 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3250,7 +7022,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
3250 } 7022 }
3251 7023
3252 /* Configure and enable interrupt */ 7024 /* Configure and enable interrupt */
3253 intr_mode = lpfc_enable_intr(phba, phba->intr_mode); 7025 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
3254 if (intr_mode == LPFC_INTR_ERROR) { 7026 if (intr_mode == LPFC_INTR_ERROR) {
3255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7027 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3256 "0430 PM resume Failed to enable interrupt\n"); 7028 "0430 PM resume Failed to enable interrupt\n");
@@ -3269,23 +7041,24 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
3269} 7041}
3270 7042
3271/** 7043/**
3272 * lpfc_io_error_detected - Driver method for handling PCI I/O error detected 7044 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
3273 * @pdev: pointer to PCI device. 7045 * @pdev: pointer to PCI device.
3274 * @state: the current PCI connection state. 7046 * @state: the current PCI connection state.
3275 * 7047 *
3276 * This routine is registered to the PCI subsystem for error handling. This 7048 * This routine is called from the PCI subsystem for I/O error handling to
3277 * function is called by the PCI subsystem after a PCI bus error affecting 7049 * device with SLI-3 interface spec. This function is called by the PCI
3278 * this device has been detected. When this function is invoked, it will 7050 * subsystem after a PCI bus error affecting this device has been detected.
3279 * need to stop all the I/Os and interrupt(s) to the device. Once that is 7051 * When this function is invoked, it will need to stop all the I/Os and
3280 * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to 7052 * interrupt(s) to the device. Once that is done, it will return
3281 * perform proper recovery as desired. 7053 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7054 * as desired.
3282 * 7055 *
3283 * Return codes 7056 * Return codes
3284 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7057 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
3285 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7058 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3286 **/ 7059 **/
3287static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, 7060static pci_ers_result_t
3288 pci_channel_state_t state) 7061lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
3289{ 7062{
3290 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7063 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3291 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7064 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3312,30 +7085,32 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
3312 lpfc_sli_abort_iocb_ring(phba, pring); 7085 lpfc_sli_abort_iocb_ring(phba, pring);
3313 7086
3314 /* Disable interrupt */ 7087 /* Disable interrupt */
3315 lpfc_disable_intr(phba); 7088 lpfc_sli_disable_intr(phba);
3316 7089
3317 /* Request a slot reset. */ 7090 /* Request a slot reset. */
3318 return PCI_ERS_RESULT_NEED_RESET; 7091 return PCI_ERS_RESULT_NEED_RESET;
3319} 7092}
3320 7093
3321/** 7094/**
3322 * lpfc_io_slot_reset - Restart a PCI device from scratch 7095 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
3323 * @pdev: pointer to PCI device. 7096 * @pdev: pointer to PCI device.
3324 * 7097 *
3325 * This routine is registered to the PCI subsystem for error handling. This is 7098 * This routine is called from the PCI subsystem for error handling to
3326 * called after PCI bus has been reset to restart the PCI card from scratch, 7099 * device with SLI-3 interface spec. This is called after PCI bus has been
3327 * as if from a cold-boot. During the PCI subsystem error recovery, after the 7100 * reset to restart the PCI card from scratch, as if from a cold-boot.
3328 * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform 7101 * During the PCI subsystem error recovery, after driver returns
3329 * proper error recovery and then call this routine before calling the .resume 7102 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
3330 * method to recover the device. This function will initialize the HBA device, 7103 * recovery and then call this routine before calling the .resume method
3331 * enable the interrupt, but it will just put the HBA to offline state without 7104 * to recover the device. This function will initialize the HBA device,
3332 * passing any I/O traffic. 7105 * enable the interrupt, but it will just put the HBA to offline state
7106 * without passing any I/O traffic.
3333 * 7107 *
3334 * Return codes 7108 * Return codes
3335 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7109 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
3336 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7110 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3337 */ 7111 */
3338static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) 7112static pci_ers_result_t
7113lpfc_io_slot_reset_s3(struct pci_dev *pdev)
3339{ 7114{
3340 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7115 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3341 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7116 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3354,11 +7129,11 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
3354 pci_set_master(pdev); 7129 pci_set_master(pdev);
3355 7130
3356 spin_lock_irq(&phba->hbalock); 7131 spin_lock_irq(&phba->hbalock);
3357 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 7132 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3358 spin_unlock_irq(&phba->hbalock); 7133 spin_unlock_irq(&phba->hbalock);
3359 7134
3360 /* Configure and enable interrupt */ 7135 /* Configure and enable interrupt */
3361 intr_mode = lpfc_enable_intr(phba, phba->intr_mode); 7136 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
3362 if (intr_mode == LPFC_INTR_ERROR) { 7137 if (intr_mode == LPFC_INTR_ERROR) {
3363 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3364 "0427 Cannot re-enable interrupt after " 7139 "0427 Cannot re-enable interrupt after "
@@ -3378,20 +7153,715 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
3378} 7153}
3379 7154
3380/** 7155/**
3381 * lpfc_io_resume - Resume PCI I/O operation 7156 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
3382 * @pdev: pointer to PCI device 7157 * @pdev: pointer to PCI device
3383 * 7158 *
3384 * This routine is registered to the PCI subsystem for error handling. It is 7159 * This routine is called from the PCI subsystem for error handling to device
3385 * called when kernel error recovery tells the lpfc driver that it is ok to 7160 * with SLI-3 interface spec. It is called when kernel error recovery tells
3386 * resume normal PCI operation after PCI bus error recovery. After this call, 7161 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
3387 * traffic can start to flow from this device again. 7162 * error recovery. After this call, traffic can start to flow from this device
7163 * again.
3388 */ 7164 */
3389static void lpfc_io_resume(struct pci_dev *pdev) 7165static void
7166lpfc_io_resume_s3(struct pci_dev *pdev)
7167{
7168 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7169 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7170
7171 lpfc_online(phba);
7172}
7173
7174/**
7175 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7176 * @phba: pointer to lpfc hba data structure.
7177 *
7178 * returns the number of ELS/CT IOCBs to reserve
7179 **/
7180int
7181lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7182{
7183 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7184
7185 if (phba->sli_rev == LPFC_SLI_REV4) {
7186 if (max_xri <= 100)
7187 return 4;
7188 else if (max_xri <= 256)
7189 return 8;
7190 else if (max_xri <= 512)
7191 return 16;
7192 else if (max_xri <= 1024)
7193 return 32;
7194 else
7195 return 48;
7196 } else
7197 return 0;
7198}
7199
7200/**
7201 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
7202 * @pdev: pointer to PCI device
7203 * @pid: pointer to PCI device identifier
7204 *
7205 * This routine is called from the kernel's PCI subsystem to device with
7206 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7207 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7208 * information of the device and driver to see if the driver state that it
7209 * can support this kind of device. If the match is successful, the driver
7210 * core invokes this routine. If this routine determines it can claim the HBA,
7211 * it does all the initialization that it needs to do to handle the HBA
7212 * properly.
7213 *
7214 * Return code
7215 * 0 - driver can claim the device
7216 * negative value - driver can not claim the device
7217 **/
7218static int __devinit
7219lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7220{
7221 struct lpfc_hba *phba;
7222 struct lpfc_vport *vport = NULL;
7223 int error;
7224 uint32_t cfg_mode, intr_mode;
7225 int mcnt;
7226
7227 /* Allocate memory for HBA structure */
7228 phba = lpfc_hba_alloc(pdev);
7229 if (!phba)
7230 return -ENOMEM;
7231
7232 /* Perform generic PCI device enabling operation */
7233 error = lpfc_enable_pci_dev(phba);
7234 if (error) {
7235 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7236 "1409 Failed to enable pci device.\n");
7237 goto out_free_phba;
7238 }
7239
7240 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
7241 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
7242 if (error)
7243 goto out_disable_pci_dev;
7244
7245 /* Set up SLI-4 specific device PCI memory space */
7246 error = lpfc_sli4_pci_mem_setup(phba);
7247 if (error) {
7248 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7249 "1410 Failed to set up pci memory space.\n");
7250 goto out_disable_pci_dev;
7251 }
7252
7253 /* Set up phase-1 common device driver resources */
7254 error = lpfc_setup_driver_resource_phase1(phba);
7255 if (error) {
7256 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7257 "1411 Failed to set up driver resource.\n");
7258 goto out_unset_pci_mem_s4;
7259 }
7260
7261 /* Set up SLI-4 Specific device driver resources */
7262 error = lpfc_sli4_driver_resource_setup(phba);
7263 if (error) {
7264 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7265 "1412 Failed to set up driver resource.\n");
7266 goto out_unset_pci_mem_s4;
7267 }
7268
7269 /* Initialize and populate the iocb list per host */
7270 error = lpfc_init_iocb_list(phba,
7271 phba->sli4_hba.max_cfg_param.max_xri);
7272 if (error) {
7273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7274 "1413 Failed to initialize iocb list.\n");
7275 goto out_unset_driver_resource_s4;
7276 }
7277
7278 /* Set up common device driver resources */
7279 error = lpfc_setup_driver_resource_phase2(phba);
7280 if (error) {
7281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7282 "1414 Failed to set up driver resource.\n");
7283 goto out_free_iocb_list;
7284 }
7285
7286 /* Create SCSI host to the physical port */
7287 error = lpfc_create_shost(phba);
7288 if (error) {
7289 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7290 "1415 Failed to create scsi host.\n");
7291 goto out_unset_driver_resource;
7292 }
7293
7294 /* Configure sysfs attributes */
7295 vport = phba->pport;
7296 error = lpfc_alloc_sysfs_attr(vport);
7297 if (error) {
7298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7299 "1416 Failed to allocate sysfs attr\n");
7300 goto out_destroy_shost;
7301 }
7302
7303 /* Now, trying to enable interrupt and bring up the device */
7304 cfg_mode = phba->cfg_use_msi;
7305 while (true) {
7306 /* Put device to a known state before enabling interrupt */
7307 lpfc_stop_port(phba);
7308 /* Configure and enable interrupt */
7309 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
7310 if (intr_mode == LPFC_INTR_ERROR) {
7311 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7312 "0426 Failed to enable interrupt.\n");
7313 error = -ENODEV;
7314 goto out_free_sysfs_attr;
7315 }
7316 /* Set up SLI-4 HBA */
7317 if (lpfc_sli4_hba_setup(phba)) {
7318 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7319 "1421 Failed to set up hba\n");
7320 error = -ENODEV;
7321 goto out_disable_intr;
7322 }
7323
7324 /* Send NOP mbx cmds for non-INTx mode active interrupt test */
7325 if (intr_mode != 0)
7326 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
7327 LPFC_ACT_INTR_CNT);
7328
7329 /* Check active interrupts received only for MSI/MSI-X */
7330 if (intr_mode == 0 ||
7331 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
7332 /* Log the current active interrupt mode */
7333 phba->intr_mode = intr_mode;
7334 lpfc_log_intr_mode(phba, intr_mode);
7335 break;
7336 }
7337 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7338 "0451 Configure interrupt mode (%d) "
7339 "failed active interrupt test.\n",
7340 intr_mode);
7341 /* Unset the preivous SLI-4 HBA setup */
7342 lpfc_sli4_unset_hba(phba);
7343 /* Try next level of interrupt mode */
7344 cfg_mode = --intr_mode;
7345 }
7346
7347 /* Perform post initialization setup */
7348 lpfc_post_init_setup(phba);
7349
7350 return 0;
7351
7352out_disable_intr:
7353 lpfc_sli4_disable_intr(phba);
7354out_free_sysfs_attr:
7355 lpfc_free_sysfs_attr(vport);
7356out_destroy_shost:
7357 lpfc_destroy_shost(phba);
7358out_unset_driver_resource:
7359 lpfc_unset_driver_resource_phase2(phba);
7360out_free_iocb_list:
7361 lpfc_free_iocb_list(phba);
7362out_unset_driver_resource_s4:
7363 lpfc_sli4_driver_resource_unset(phba);
7364out_unset_pci_mem_s4:
7365 lpfc_sli4_pci_mem_unset(phba);
7366out_disable_pci_dev:
7367 lpfc_disable_pci_dev(phba);
7368out_free_phba:
7369 lpfc_hba_free(phba);
7370 return error;
7371}
7372
7373/**
7374 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
7375 * @pdev: pointer to PCI device
7376 *
7377 * This routine is called from the kernel's PCI subsystem to device with
7378 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7379 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7380 * device to be removed from the PCI subsystem properly.
7381 **/
7382static void __devexit
7383lpfc_pci_remove_one_s4(struct pci_dev *pdev)
7384{
7385 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7386 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7387 struct lpfc_vport **vports;
7388 struct lpfc_hba *phba = vport->phba;
7389 int i;
7390
7391 /* Mark the device unloading flag */
7392 spin_lock_irq(&phba->hbalock);
7393 vport->load_flag |= FC_UNLOADING;
7394 spin_unlock_irq(&phba->hbalock);
7395
7396 /* Free the HBA sysfs attributes */
7397 lpfc_free_sysfs_attr(vport);
7398
7399 /* Release all the vports against this physical port */
7400 vports = lpfc_create_vport_work_array(phba);
7401 if (vports != NULL)
7402 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7403 fc_vport_terminate(vports[i]->fc_vport);
7404 lpfc_destroy_vport_work_array(phba, vports);
7405
7406 /* Remove FC host and then SCSI host with the physical port */
7407 fc_remove_host(shost);
7408 scsi_remove_host(shost);
7409
7410 /* Perform cleanup on the physical port */
7411 lpfc_cleanup(vport);
7412
7413 /*
7414 * Bring down the SLI Layer. This step disables all interrupts,
7415 * clears the rings, discards all mailbox commands, and resets
7416 * the HBA FCoE function.
7417 */
7418 lpfc_debugfs_terminate(vport);
7419 lpfc_sli4_hba_unset(phba);
7420
7421 spin_lock_irq(&phba->hbalock);
7422 list_del_init(&vport->listentry);
7423 spin_unlock_irq(&phba->hbalock);
7424
7425 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
7426 * buffers are released to their corresponding pools here.
7427 */
7428 lpfc_scsi_free(phba);
7429 lpfc_sli4_driver_resource_unset(phba);
7430
7431 /* Unmap adapter Control and Doorbell registers */
7432 lpfc_sli4_pci_mem_unset(phba);
7433
7434 /* Release PCI resources and disable device's PCI function */
7435 scsi_host_put(shost);
7436 lpfc_disable_pci_dev(phba);
7437
7438 /* Finally, free the driver's device data structure */
7439 lpfc_hba_free(phba);
7440
7441 return;
7442}
7443
7444/**
7445 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
7446 * @pdev: pointer to PCI device
7447 * @msg: power management message
7448 *
7449 * This routine is called from the kernel's PCI subsystem to support system
7450 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
7451 * this method, it quiesces the device by stopping the driver's worker
7452 * thread for the device, turning off device's interrupt and DMA, and bring
7453 * the device offline. Note that as the driver implements the minimum PM
7454 * requirements to a power-aware driver's PM support for suspend/resume -- all
7455 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
7456 * method call will be treated as SUSPEND and the driver will fully
7457 * reinitialize its device during resume() method call, the driver will set
7458 * device to PCI_D3hot state in PCI config space instead of setting it
7459 * according to the @msg provided by the PM.
7460 *
7461 * Return code
7462 * 0 - driver suspended the device
7463 * Error otherwise
7464 **/
7465static int
7466lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
3390{ 7467{
3391 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7468 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3392 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7469 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3393 7470
7471 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7472 "0298 PCI device Power Management suspend.\n");
7473
7474 /* Bring down the device */
7475 lpfc_offline_prep(phba);
7476 lpfc_offline(phba);
7477 kthread_stop(phba->worker_thread);
7478
7479 /* Disable interrupt from device */
7480 lpfc_sli4_disable_intr(phba);
7481
7482 /* Save device state to PCI config space */
7483 pci_save_state(pdev);
7484 pci_set_power_state(pdev, PCI_D3hot);
7485
7486 return 0;
7487}
7488
7489/**
7490 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
7491 * @pdev: pointer to PCI device
7492 *
7493 * This routine is called from the kernel's PCI subsystem to support system
7494 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
7495 * this method, it restores the device's PCI config space state and fully
7496 * reinitializes the device and brings it online. Note that as the driver
7497 * implements the minimum PM requirements to a power-aware driver's PM for
7498 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7499 * to the suspend() method call will be treated as SUSPEND and the driver
7500 * will fully reinitialize its device during resume() method call, the device
7501 * will be set to PCI_D0 directly in PCI config space before restoring the
7502 * state.
7503 *
7504 * Return code
7505 * 0 - driver suspended the device
7506 * Error otherwise
7507 **/
7508static int
7509lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7510{
7511 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7512 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7513 uint32_t intr_mode;
7514 int error;
7515
7516 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7517 "0292 PCI device Power Management resume.\n");
7518
7519 /* Restore device state from PCI config space */
7520 pci_set_power_state(pdev, PCI_D0);
7521 pci_restore_state(pdev);
7522 if (pdev->is_busmaster)
7523 pci_set_master(pdev);
7524
7525 /* Startup the kernel thread for this host adapter. */
7526 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7527 "lpfc_worker_%d", phba->brd_no);
7528 if (IS_ERR(phba->worker_thread)) {
7529 error = PTR_ERR(phba->worker_thread);
7530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7531 "0293 PM resume failed to start worker "
7532 "thread: error=x%x.\n", error);
7533 return error;
7534 }
7535
7536 /* Configure and enable interrupt */
7537 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
7538 if (intr_mode == LPFC_INTR_ERROR) {
7539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7540 "0294 PM resume Failed to enable interrupt\n");
7541 return -EIO;
7542 } else
7543 phba->intr_mode = intr_mode;
7544
7545 /* Restart HBA and bring it online */
7546 lpfc_sli_brdrestart(phba);
3394 lpfc_online(phba); 7547 lpfc_online(phba);
7548
7549 /* Log the current active interrupt mode */
7550 lpfc_log_intr_mode(phba, phba->intr_mode);
7551
7552 return 0;
7553}
7554
7555/**
7556 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
7557 * @pdev: pointer to PCI device.
7558 * @state: the current PCI connection state.
7559 *
7560 * This routine is called from the PCI subsystem for error handling to device
7561 * with SLI-4 interface spec. This function is called by the PCI subsystem
7562 * after a PCI bus error affecting this device has been detected. When this
7563 * function is invoked, it will need to stop all the I/Os and interrupt(s)
7564 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
7565 * for the PCI subsystem to perform proper recovery as desired.
7566 *
7567 * Return codes
7568 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7569 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7570 **/
7571static pci_ers_result_t
7572lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
7573{
7574 return PCI_ERS_RESULT_NEED_RESET;
7575}
7576
7577/**
7578 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
7579 * @pdev: pointer to PCI device.
7580 *
7581 * This routine is called from the PCI subsystem for error handling to device
7582 * with SLI-4 interface spec. It is called after PCI bus has been reset to
7583 * restart the PCI card from scratch, as if from a cold-boot. During the
7584 * PCI subsystem error recovery, after the driver returns
7585 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7586 * recovery and then call this routine before calling the .resume method to
7587 * recover the device. This function will initialize the HBA device, enable
7588 * the interrupt, but it will just put the HBA to offline state without
7589 * passing any I/O traffic.
7590 *
7591 * Return codes
7592 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7593 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7594 */
7595static pci_ers_result_t
7596lpfc_io_slot_reset_s4(struct pci_dev *pdev)
7597{
7598 return PCI_ERS_RESULT_RECOVERED;
7599}
7600
7601/**
7602 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
7603 * @pdev: pointer to PCI device
7604 *
7605 * This routine is called from the PCI subsystem for error handling to device
7606 * with SLI-4 interface spec. It is called when kernel error recovery tells
7607 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7608 * error recovery. After this call, traffic can start to flow from this device
7609 * again.
7610 **/
7611static void
7612lpfc_io_resume_s4(struct pci_dev *pdev)
7613{
7614 return;
7615}
7616
7617/**
7618 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
7619 * @pdev: pointer to PCI device
7620 * @pid: pointer to PCI device identifier
7621 *
7622 * This routine is to be registered to the kernel's PCI subsystem. When an
7623 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
7624 * at PCI device-specific information of the device and driver to see if the
7625 * driver state that it can support this kind of device. If the match is
7626 * successful, the driver core invokes this routine. This routine dispatches
7627 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
7628 * do all the initialization that it needs to do to handle the HBA device
7629 * properly.
7630 *
7631 * Return code
7632 * 0 - driver can claim the device
7633 * negative value - driver can not claim the device
7634 **/
7635static int __devinit
7636lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7637{
7638 int rc;
7639 uint16_t dev_id;
7640
7641 if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
7642 return -ENODEV;
7643
7644 switch (dev_id) {
7645 case PCI_DEVICE_ID_TIGERSHARK:
7646 rc = lpfc_pci_probe_one_s4(pdev, pid);
7647 break;
7648 default:
7649 rc = lpfc_pci_probe_one_s3(pdev, pid);
7650 break;
7651 }
7652 return rc;
7653}
7654
7655/**
7656 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
7657 * @pdev: pointer to PCI device
7658 *
7659 * This routine is to be registered to the kernel's PCI subsystem. When an
7660 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
7661 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
7662 * remove routine, which will perform all the necessary cleanup for the
7663 * device to be removed from the PCI subsystem properly.
7664 **/
7665static void __devexit
7666lpfc_pci_remove_one(struct pci_dev *pdev)
7667{
7668 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7669 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7670
7671 switch (phba->pci_dev_grp) {
7672 case LPFC_PCI_DEV_LP:
7673 lpfc_pci_remove_one_s3(pdev);
7674 break;
7675 case LPFC_PCI_DEV_OC:
7676 lpfc_pci_remove_one_s4(pdev);
7677 break;
7678 default:
7679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7680 "1424 Invalid PCI device group: 0x%x\n",
7681 phba->pci_dev_grp);
7682 break;
7683 }
7684 return;
7685}
7686
7687/**
7688 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
7689 * @pdev: pointer to PCI device
7690 * @msg: power management message
7691 *
7692 * This routine is to be registered to the kernel's PCI subsystem to support
7693 * system Power Management (PM). When PM invokes this method, it dispatches
7694 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
7695 * suspend the device.
7696 *
7697 * Return code
7698 * 0 - driver suspended the device
7699 * Error otherwise
7700 **/
7701static int
7702lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
7703{
7704 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7705 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7706 int rc = -ENODEV;
7707
7708 switch (phba->pci_dev_grp) {
7709 case LPFC_PCI_DEV_LP:
7710 rc = lpfc_pci_suspend_one_s3(pdev, msg);
7711 break;
7712 case LPFC_PCI_DEV_OC:
7713 rc = lpfc_pci_suspend_one_s4(pdev, msg);
7714 break;
7715 default:
7716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7717 "1425 Invalid PCI device group: 0x%x\n",
7718 phba->pci_dev_grp);
7719 break;
7720 }
7721 return rc;
7722}
7723
7724/**
7725 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
7726 * @pdev: pointer to PCI device
7727 *
7728 * This routine is to be registered to the kernel's PCI subsystem to support
7729 * system Power Management (PM). When PM invokes this method, it dispatches
7730 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
7731 * resume the device.
7732 *
7733 * Return code
7734 * 0 - driver suspended the device
7735 * Error otherwise
7736 **/
7737static int
7738lpfc_pci_resume_one(struct pci_dev *pdev)
7739{
7740 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7741 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7742 int rc = -ENODEV;
7743
7744 switch (phba->pci_dev_grp) {
7745 case LPFC_PCI_DEV_LP:
7746 rc = lpfc_pci_resume_one_s3(pdev);
7747 break;
7748 case LPFC_PCI_DEV_OC:
7749 rc = lpfc_pci_resume_one_s4(pdev);
7750 break;
7751 default:
7752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7753 "1426 Invalid PCI device group: 0x%x\n",
7754 phba->pci_dev_grp);
7755 break;
7756 }
7757 return rc;
7758}
7759
7760/**
7761 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
7762 * @pdev: pointer to PCI device.
7763 * @state: the current PCI connection state.
7764 *
7765 * This routine is registered to the PCI subsystem for error handling. This
7766 * function is called by the PCI subsystem after a PCI bus error affecting
7767 * this device has been detected. When this routine is invoked, it dispatches
7768 * the action to the proper SLI-3 or SLI-4 device error detected handling
7769 * routine, which will perform the proper error detected operation.
7770 *
7771 * Return codes
7772 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7773 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7774 **/
7775static pci_ers_result_t
7776lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7777{
7778 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7779 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7780 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7781
7782 switch (phba->pci_dev_grp) {
7783 case LPFC_PCI_DEV_LP:
7784 rc = lpfc_io_error_detected_s3(pdev, state);
7785 break;
7786 case LPFC_PCI_DEV_OC:
7787 rc = lpfc_io_error_detected_s4(pdev, state);
7788 break;
7789 default:
7790 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7791 "1427 Invalid PCI device group: 0x%x\n",
7792 phba->pci_dev_grp);
7793 break;
7794 }
7795 return rc;
7796}
7797
7798/**
7799 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
7800 * @pdev: pointer to PCI device.
7801 *
7802 * This routine is registered to the PCI subsystem for error handling. This
7803 * function is called after PCI bus has been reset to restart the PCI card
7804 * from scratch, as if from a cold-boot. When this routine is invoked, it
7805 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
7806 * routine, which will perform the proper device reset.
7807 *
7808 * Return codes
7809 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7810 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7811 **/
7812static pci_ers_result_t
7813lpfc_io_slot_reset(struct pci_dev *pdev)
7814{
7815 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7816 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7817 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7818
7819 switch (phba->pci_dev_grp) {
7820 case LPFC_PCI_DEV_LP:
7821 rc = lpfc_io_slot_reset_s3(pdev);
7822 break;
7823 case LPFC_PCI_DEV_OC:
7824 rc = lpfc_io_slot_reset_s4(pdev);
7825 break;
7826 default:
7827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7828 "1428 Invalid PCI device group: 0x%x\n",
7829 phba->pci_dev_grp);
7830 break;
7831 }
7832 return rc;
7833}
7834
7835/**
7836 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
7837 * @pdev: pointer to PCI device
7838 *
7839 * This routine is registered to the PCI subsystem for error handling. It
7840 * is called when kernel error recovery tells the lpfc driver that it is
7841 * OK to resume normal PCI operation after PCI bus error recovery. When
7842 * this routine is invoked, it dispatches the action to the proper SLI-3
7843 * or SLI-4 device io_resume routine, which will resume the device operation.
7844 **/
7845static void
7846lpfc_io_resume(struct pci_dev *pdev)
7847{
7848 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7849 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7850
7851 switch (phba->pci_dev_grp) {
7852 case LPFC_PCI_DEV_LP:
7853 lpfc_io_resume_s3(pdev);
7854 break;
7855 case LPFC_PCI_DEV_OC:
7856 lpfc_io_resume_s4(pdev);
7857 break;
7858 default:
7859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7860 "1429 Invalid PCI device group: 0x%x\n",
7861 phba->pci_dev_grp);
7862 break;
7863 }
7864 return;
3395} 7865}
3396 7866
3397static struct pci_device_id lpfc_id_table[] = { 7867static struct pci_device_id lpfc_id_table[] = {
@@ -3469,6 +7939,8 @@ static struct pci_device_id lpfc_id_table[] = {
3469 PCI_ANY_ID, PCI_ANY_ID, }, 7939 PCI_ANY_ID, PCI_ANY_ID, },
3470 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 7940 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
3471 PCI_ANY_ID, PCI_ANY_ID, }, 7941 PCI_ANY_ID, PCI_ANY_ID, },
7942 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7943 PCI_ANY_ID, PCI_ANY_ID, },
3472 { 0 } 7944 { 0 }
3473}; 7945};
3474 7946
@@ -3486,7 +7958,7 @@ static struct pci_driver lpfc_driver = {
3486 .probe = lpfc_pci_probe_one, 7958 .probe = lpfc_pci_probe_one,
3487 .remove = __devexit_p(lpfc_pci_remove_one), 7959 .remove = __devexit_p(lpfc_pci_remove_one),
3488 .suspend = lpfc_pci_suspend_one, 7960 .suspend = lpfc_pci_suspend_one,
3489 .resume = lpfc_pci_resume_one, 7961 .resume = lpfc_pci_resume_one,
3490 .err_handler = &lpfc_err_handler, 7962 .err_handler = &lpfc_err_handler,
3491}; 7963};
3492 7964
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 1aa85709b012..954ba57970a3 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -18,33 +18,39 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LOG_ELS 0x1 /* ELS events */ 21#define LOG_ELS 0x00000001 /* ELS events */
22#define LOG_DISCOVERY 0x2 /* Link discovery events */ 22#define LOG_DISCOVERY 0x00000002 /* Link discovery events */
23#define LOG_MBOX 0x4 /* Mailbox events */ 23#define LOG_MBOX 0x00000004 /* Mailbox events */
24#define LOG_INIT 0x8 /* Initialization events */ 24#define LOG_INIT 0x00000008 /* Initialization events */
25#define LOG_LINK_EVENT 0x10 /* Link events */ 25#define LOG_LINK_EVENT 0x00000010 /* Link events */
26#define LOG_IP 0x20 /* IP traffic history */ 26#define LOG_IP 0x00000020 /* IP traffic history */
27#define LOG_FCP 0x40 /* FCP traffic history */ 27#define LOG_FCP 0x00000040 /* FCP traffic history */
28#define LOG_NODE 0x80 /* Node table events */ 28#define LOG_NODE 0x00000080 /* Node table events */
29#define LOG_TEMP 0x100 /* Temperature sensor events */ 29#define LOG_TEMP 0x00000100 /* Temperature sensor events */
30#define LOG_BG 0x200 /* BlockGuard events */ 30#define LOG_BG 0x00000200 /* BlockGuard events */
31#define LOG_MISC 0x400 /* Miscellaneous events */ 31#define LOG_MISC 0x00000400 /* Miscellaneous events */
32#define LOG_SLI 0x800 /* SLI events */ 32#define LOG_SLI 0x00000800 /* SLI events */
33#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ 33#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */
34#define LOG_LIBDFC 0x2000 /* Libdfc events */ 34#define LOG_LIBDFC 0x00002000 /* Libdfc events */
35#define LOG_VPORT 0x4000 /* NPIV events */ 35#define LOG_VPORT 0x00004000 /* NPIV events */
36#define LOG_ALL_MSG 0xffff /* LOG all messages */ 36#define LOF_SECURITY 0x00008000 /* Security events */
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
37 39
38#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 40#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
39 do { \ 41do { \
40 { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ 42 { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
41 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ 43 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
42 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ 44 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
43 } while (0) 45} while (0)
44 46
45#define lpfc_printf_log(phba, level, mask, fmt, arg...) \ 47#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
46 do { \ 48do { \
47 { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ 49 { uint32_t log_verbose = (phba)->pport ? \
50 (phba)->pport->cfg_log_verbose : \
51 (phba)->cfg_log_verbose; \
52 if (((mask) & log_verbose) || (level[1] <= '3')) \
48 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ 53 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
49 fmt, phba->brd_no, ##arg); } \ 54 fmt, phba->brd_no, ##arg); \
50 } while (0) 55 } \
56} while (0)
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 134fc7fc2127..3423571dd1b3 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28 28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -39,6 +41,44 @@
39#include "lpfc_compat.h" 41#include "lpfc_compat.h"
40 42
41/** 43/**
44 * lpfc_dump_static_vport - Dump HBA's static vport information.
45 * @phba: pointer to lpfc hba data structure.
46 * @pmb: pointer to the driver internal queue element for mailbox command.
47 * @offset: offset for dumping vport info.
48 *
49 * The dump mailbox command provides a method for the device driver to obtain
50 * various types of information from the HBA device.
51 *
52 * This routine prepares the mailbox command for dumping list of static
53 * vports to be created.
54 **/
55void
56lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
57 uint16_t offset)
58{
59 MAILBOX_t *mb;
60 void *ctx;
61
62 mb = &pmb->u.mb;
63 ctx = pmb->context2;
64
65 /* Setup to dump vport info region */
66 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
67 mb->mbxCommand = MBX_DUMP_MEMORY;
68 mb->un.varDmp.cv = 1;
69 mb->un.varDmp.type = DMP_NV_PARAMS;
70 mb->un.varDmp.entry_index = offset;
71 mb->un.varDmp.region_id = DMP_REGION_VPORT;
72 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
73 mb->un.varDmp.co = 0;
74 mb->un.varDmp.resp_offset = 0;
75 pmb->context2 = ctx;
76 mb->mbxOwner = OWN_HOST;
77
78 return;
79}
80
81/**
42 * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory 82 * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory
43 * @phba: pointer to lpfc hba data structure. 83 * @phba: pointer to lpfc hba data structure.
44 * @pmb: pointer to the driver internal queue element for mailbox command. 84 * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -58,7 +98,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
58 MAILBOX_t *mb; 98 MAILBOX_t *mb;
59 void *ctx; 99 void *ctx;
60 100
61 mb = &pmb->mb; 101 mb = &pmb->u.mb;
62 ctx = pmb->context2; 102 ctx = pmb->context2;
63 103
64 /* Setup to dump VPD region */ 104 /* Setup to dump VPD region */
@@ -90,7 +130,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
90 MAILBOX_t *mb; 130 MAILBOX_t *mb;
91 void *ctx; 131 void *ctx;
92 132
93 mb = &pmb->mb; 133 mb = &pmb->u.mb;
94 /* Save context so that we can restore after memset */ 134 /* Save context so that we can restore after memset */
95 ctx = pmb->context2; 135 ctx = pmb->context2;
96 136
@@ -125,7 +165,7 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
125{ 165{
126 MAILBOX_t *mb; 166 MAILBOX_t *mb;
127 167
128 mb = &pmb->mb; 168 mb = &pmb->u.mb;
129 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 169 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
130 mb->mbxCommand = MBX_READ_NV; 170 mb->mbxCommand = MBX_READ_NV;
131 mb->mbxOwner = OWN_HOST; 171 mb->mbxOwner = OWN_HOST;
@@ -151,7 +191,7 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
151{ 191{
152 MAILBOX_t *mb; 192 MAILBOX_t *mb;
153 193
154 mb = &pmb->mb; 194 mb = &pmb->u.mb;
155 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 195 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
156 mb->mbxCommand = MBX_ASYNCEVT_ENABLE; 196 mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
157 mb->un.varCfgAsyncEvent.ring = ring; 197 mb->un.varCfgAsyncEvent.ring = ring;
@@ -177,7 +217,7 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
177{ 217{
178 MAILBOX_t *mb; 218 MAILBOX_t *mb;
179 219
180 mb = &pmb->mb; 220 mb = &pmb->u.mb;
181 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 221 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
182 mb->mbxCommand = MBX_HEARTBEAT; 222 mb->mbxCommand = MBX_HEARTBEAT;
183 mb->mbxOwner = OWN_HOST; 223 mb->mbxOwner = OWN_HOST;
@@ -211,7 +251,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
211 struct lpfc_sli *psli; 251 struct lpfc_sli *psli;
212 252
213 psli = &phba->sli; 253 psli = &phba->sli;
214 mb = &pmb->mb; 254 mb = &pmb->u.mb;
215 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 255 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
216 256
217 INIT_LIST_HEAD(&mp->list); 257 INIT_LIST_HEAD(&mp->list);
@@ -248,7 +288,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
248{ 288{
249 MAILBOX_t *mb; 289 MAILBOX_t *mb;
250 290
251 mb = &pmb->mb; 291 mb = &pmb->u.mb;
252 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 292 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
253 293
254 mb->un.varClearLA.eventTag = phba->fc_eventTag; 294 mb->un.varClearLA.eventTag = phba->fc_eventTag;
@@ -275,7 +315,7 @@ void
275lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 315lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
276{ 316{
277 struct lpfc_vport *vport = phba->pport; 317 struct lpfc_vport *vport = phba->pport;
278 MAILBOX_t *mb = &pmb->mb; 318 MAILBOX_t *mb = &pmb->u.mb;
279 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 319 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
280 320
281 /* NEW_FEATURE 321 /* NEW_FEATURE
@@ -321,7 +361,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
321int 361int
322lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 362lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
323{ 363{
324 MAILBOX_t *mb = &pmb->mb; 364 MAILBOX_t *mb = &pmb->u.mb;
325 uint32_t attentionConditions[2]; 365 uint32_t attentionConditions[2];
326 366
327 /* Sanity check */ 367 /* Sanity check */
@@ -405,7 +445,7 @@ lpfc_init_link(struct lpfc_hba * phba,
405 struct lpfc_sli *psli; 445 struct lpfc_sli *psli;
406 MAILBOX_t *mb; 446 MAILBOX_t *mb;
407 447
408 mb = &pmb->mb; 448 mb = &pmb->u.mb;
409 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 449 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
410 450
411 psli = &phba->sli; 451 psli = &phba->sli;
@@ -492,7 +532,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
492 struct lpfc_sli *psli; 532 struct lpfc_sli *psli;
493 533
494 psli = &phba->sli; 534 psli = &phba->sli;
495 mb = &pmb->mb; 535 mb = &pmb->u.mb;
496 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 536 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
497 537
498 mb->mbxOwner = OWN_HOST; 538 mb->mbxOwner = OWN_HOST;
@@ -515,7 +555,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
515 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 555 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
516 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); 556 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
517 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); 557 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
518 mb->un.varRdSparm.vpi = vpi; 558 mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
519 559
520 /* save address for completion */ 560 /* save address for completion */
521 pmb->context1 = mp; 561 pmb->context1 = mp;
@@ -544,10 +584,12 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
544{ 584{
545 MAILBOX_t *mb; 585 MAILBOX_t *mb;
546 586
547 mb = &pmb->mb; 587 mb = &pmb->u.mb;
548 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 588 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
549 589
550 mb->un.varUnregDID.did = did; 590 mb->un.varUnregDID.did = did;
591 if (vpi != 0xffff)
592 vpi += phba->vpi_base;
551 mb->un.varUnregDID.vpi = vpi; 593 mb->un.varUnregDID.vpi = vpi;
552 594
553 mb->mbxCommand = MBX_UNREG_D_ID; 595 mb->mbxCommand = MBX_UNREG_D_ID;
@@ -573,7 +615,7 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
573{ 615{
574 MAILBOX_t *mb; 616 MAILBOX_t *mb;
575 617
576 mb = &pmb->mb; 618 mb = &pmb->u.mb;
577 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 619 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
578 620
579 mb->mbxCommand = MBX_READ_CONFIG; 621 mb->mbxCommand = MBX_READ_CONFIG;
@@ -598,7 +640,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
598{ 640{
599 MAILBOX_t *mb; 641 MAILBOX_t *mb;
600 642
601 mb = &pmb->mb; 643 mb = &pmb->u.mb;
602 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 644 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
603 645
604 mb->mbxCommand = MBX_READ_LNK_STAT; 646 mb->mbxCommand = MBX_READ_LNK_STAT;
@@ -607,7 +649,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
607} 649}
608 650
609/** 651/**
610 * lpfc_reg_login - Prepare a mailbox command for registering remote login 652 * lpfc_reg_rpi - Prepare a mailbox command for registering remote login
611 * @phba: pointer to lpfc hba data structure. 653 * @phba: pointer to lpfc hba data structure.
612 * @vpi: virtual N_Port identifier. 654 * @vpi: virtual N_Port identifier.
613 * @did: remote port identifier. 655 * @did: remote port identifier.
@@ -631,17 +673,23 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
631 * 1 - DMA memory allocation failed 673 * 1 - DMA memory allocation failed
632 **/ 674 **/
633int 675int
634lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, 676lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
635 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag) 677 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
636{ 678{
637 MAILBOX_t *mb = &pmb->mb; 679 MAILBOX_t *mb = &pmb->u.mb;
638 uint8_t *sparam; 680 uint8_t *sparam;
639 struct lpfc_dmabuf *mp; 681 struct lpfc_dmabuf *mp;
640 682
641 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 683 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
642 684
643 mb->un.varRegLogin.rpi = 0; 685 mb->un.varRegLogin.rpi = 0;
644 mb->un.varRegLogin.vpi = vpi; 686 if (phba->sli_rev == LPFC_SLI_REV4) {
687 mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba);
688 if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
689 return 1;
690 }
691
692 mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
645 mb->un.varRegLogin.did = did; 693 mb->un.varRegLogin.did = did;
646 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ 694 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
647 695
@@ -697,15 +745,16 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
697{ 745{
698 MAILBOX_t *mb; 746 MAILBOX_t *mb;
699 747
700 mb = &pmb->mb; 748 mb = &pmb->u.mb;
701 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 749 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
702 750
703 mb->un.varUnregLogin.rpi = (uint16_t) rpi; 751 mb->un.varUnregLogin.rpi = (uint16_t) rpi;
704 mb->un.varUnregLogin.rsvd1 = 0; 752 mb->un.varUnregLogin.rsvd1 = 0;
705 mb->un.varUnregLogin.vpi = vpi; 753 mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
706 754
707 mb->mbxCommand = MBX_UNREG_LOGIN; 755 mb->mbxCommand = MBX_UNREG_LOGIN;
708 mb->mbxOwner = OWN_HOST; 756 mb->mbxOwner = OWN_HOST;
757
709 return; 758 return;
710} 759}
711 760
@@ -725,15 +774,15 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
725 * This routine prepares the mailbox command for registering a virtual N_Port. 774 * This routine prepares the mailbox command for registering a virtual N_Port.
726 **/ 775 **/
727void 776void
728lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, 777lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
729 LPFC_MBOXQ_t *pmb)
730{ 778{
731 MAILBOX_t *mb = &pmb->mb; 779 MAILBOX_t *mb = &pmb->u.mb;
732 780
733 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 781 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
734 782
735 mb->un.varRegVpi.vpi = vpi; 783 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
736 mb->un.varRegVpi.sid = sid; 784 mb->un.varRegVpi.sid = vport->fc_myDID;
785 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
737 786
738 mb->mbxCommand = MBX_REG_VPI; 787 mb->mbxCommand = MBX_REG_VPI;
739 mb->mbxOwner = OWN_HOST; 788 mb->mbxOwner = OWN_HOST;
@@ -760,10 +809,10 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
760void 809void
761lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) 810lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
762{ 811{
763 MAILBOX_t *mb = &pmb->mb; 812 MAILBOX_t *mb = &pmb->u.mb;
764 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 813 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
765 814
766 mb->un.varUnregVpi.vpi = vpi; 815 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
767 816
768 mb->mbxCommand = MBX_UNREG_VPI; 817 mb->mbxCommand = MBX_UNREG_VPI;
769 mb->mbxOwner = OWN_HOST; 818 mb->mbxOwner = OWN_HOST;
@@ -852,7 +901,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
852void 901void
853lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 902lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
854{ 903{
855 MAILBOX_t *mb = &pmb->mb; 904 MAILBOX_t *mb = &pmb->u.mb;
856 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 905 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
857 mb->un.varRdRev.cv = 1; 906 mb->un.varRdRev.cv = 1;
858 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */ 907 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
@@ -945,7 +994,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
945 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb) 994 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
946{ 995{
947 int i; 996 int i;
948 MAILBOX_t *mb = &pmb->mb; 997 MAILBOX_t *mb = &pmb->u.mb;
949 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq; 998 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
950 999
951 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 1000 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -1020,7 +1069,7 @@ void
1020lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) 1069lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1021{ 1070{
1022 int i; 1071 int i;
1023 MAILBOX_t *mb = &pmb->mb; 1072 MAILBOX_t *mb = &pmb->u.mb;
1024 struct lpfc_sli *psli; 1073 struct lpfc_sli *psli;
1025 struct lpfc_sli_ring *pring; 1074 struct lpfc_sli_ring *pring;
1026 1075
@@ -1075,7 +1124,7 @@ void
1075lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1124lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1076{ 1125{
1077 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; 1126 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
1078 MAILBOX_t *mb = &pmb->mb; 1127 MAILBOX_t *mb = &pmb->u.mb;
1079 dma_addr_t pdma_addr; 1128 dma_addr_t pdma_addr;
1080 uint32_t bar_low, bar_high; 1129 uint32_t bar_low, bar_high;
1081 size_t offset; 1130 size_t offset;
@@ -1099,21 +1148,22 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1099 1148
1100 /* If HBA supports SLI=3 ask for it */ 1149 /* If HBA supports SLI=3 ask for it */
1101 1150
1102 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { 1151 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
1103 if (phba->cfg_enable_bg) 1152 if (phba->cfg_enable_bg)
1104 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ 1153 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
1154 mb->un.varCfgPort.cdss = 1; /* Configure Security */
1105 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ 1155 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
1106 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ 1156 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
1107 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */ 1157 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
1108 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); 1158 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1109 if (phba->max_vpi && phba->cfg_enable_npiv && 1159 if (phba->max_vpi && phba->cfg_enable_npiv &&
1110 phba->vpd.sli3Feat.cmv) { 1160 phba->vpd.sli3Feat.cmv) {
1111 mb->un.varCfgPort.max_vpi = phba->max_vpi; 1161 mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
1112 mb->un.varCfgPort.cmv = 1; 1162 mb->un.varCfgPort.cmv = 1;
1113 } else 1163 } else
1114 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; 1164 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
1115 } else 1165 } else
1116 phba->sli_rev = 2; 1166 phba->sli_rev = LPFC_SLI_REV2;
1117 mb->un.varCfgPort.sli_mode = phba->sli_rev; 1167 mb->un.varCfgPort.sli_mode = phba->sli_rev;
1118 1168
1119 /* Now setup pcb */ 1169 /* Now setup pcb */
@@ -1245,7 +1295,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1245void 1295void
1246lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1296lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1247{ 1297{
1248 MAILBOX_t *mb = &pmb->mb; 1298 MAILBOX_t *mb = &pmb->u.mb;
1249 1299
1250 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 1300 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1251 mb->mbxCommand = MBX_KILL_BOARD; 1301 mb->mbxCommand = MBX_KILL_BOARD;
@@ -1305,29 +1355,98 @@ lpfc_mbox_get(struct lpfc_hba * phba)
1305} 1355}
1306 1356
1307/** 1357/**
1358 * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
1359 * @phba: pointer to lpfc hba data structure.
1360 * @mbq: pointer to the driver internal queue element for mailbox command.
1361 *
1362 * This routine put the completed mailbox command into the mailbox command
1363 * complete list. This is the unlocked version of the routine. The mailbox
1364 * complete list is used by the driver worker thread to process mailbox
1365 * complete callback functions outside the driver interrupt handler.
1366 **/
1367void
1368__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1369{
1370 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1371}
1372
1373/**
1308 * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list 1374 * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
1309 * @phba: pointer to lpfc hba data structure. 1375 * @phba: pointer to lpfc hba data structure.
1310 * @mbq: pointer to the driver internal queue element for mailbox command. 1376 * @mbq: pointer to the driver internal queue element for mailbox command.
1311 * 1377 *
1312 * This routine put the completed mailbox command into the mailbox command 1378 * This routine put the completed mailbox command into the mailbox command
1313 * complete list. This routine is called from driver interrupt handler 1379 * complete list. This is the locked version of the routine. The mailbox
1314 * context.The mailbox complete list is used by the driver worker thread 1380 * complete list is used by the driver worker thread to process mailbox
1315 * to process mailbox complete callback functions outside the driver interrupt 1381 * complete callback functions outside the driver interrupt handler.
1316 * handler.
1317 **/ 1382 **/
1318void 1383void
1319lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) 1384lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1320{ 1385{
1321 unsigned long iflag; 1386 unsigned long iflag;
1322 1387
1323 /* This function expects to be called from interrupt context */ 1388 /* This function expects to be called from interrupt context */
1324 spin_lock_irqsave(&phba->hbalock, iflag); 1389 spin_lock_irqsave(&phba->hbalock, iflag);
1325 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); 1390 __lpfc_mbox_cmpl_put(phba, mbq);
1326 spin_unlock_irqrestore(&phba->hbalock, iflag); 1391 spin_unlock_irqrestore(&phba->hbalock, iflag);
1327 return; 1392 return;
1328} 1393}
1329 1394
1330/** 1395/**
1396 * lpfc_mbox_cmd_check - Check the validality of a mailbox command
1397 * @phba: pointer to lpfc hba data structure.
1398 * @mboxq: pointer to the driver internal queue element for mailbox command.
1399 *
1400 * This routine is to check whether a mailbox command is valid to be issued.
1401 * This check will be performed by both the mailbox issue API when a client
1402 * is to issue a mailbox command to the mailbox transport.
1403 *
1404 * Return 0 - pass the check, -ENODEV - fail the check
1405 **/
1406int
1407lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1408{
1409 /* Mailbox command that have a completion handler must also have a
1410 * vport specified.
1411 */
1412 if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
1413 mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
1414 if (!mboxq->vport) {
1415 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1416 "1814 Mbox x%x failed, no vport\n",
1417 mboxq->u.mb.mbxCommand);
1418 dump_stack();
1419 return -ENODEV;
1420 }
1421 }
1422 return 0;
1423}
1424
1425/**
1426 * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
1427 * @phba: pointer to lpfc hba data structure.
1428 *
1429 * This routine is to check whether the HBA device is ready for posting a
1430 * mailbox command. It is used by the mailbox transport API at the time the
1431 * to post a mailbox command to the device.
1432 *
1433 * Return 0 - pass the check, -ENODEV - fail the check
1434 **/
1435int
1436lpfc_mbox_dev_check(struct lpfc_hba *phba)
1437{
1438 /* If the PCI channel is in offline state, do not issue mbox */
1439 if (unlikely(pci_channel_offline(phba->pcidev)))
1440 return -ENODEV;
1441
1442 /* If the HBA is in error state, do not issue mbox */
1443 if (phba->link_state == LPFC_HBA_ERROR)
1444 return -ENODEV;
1445
1446 return 0;
1447}
1448
1449/**
1331 * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value 1450 * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
1332 * @phba: pointer to lpfc hba data structure. 1451 * @phba: pointer to lpfc hba data structure.
1333 * @cmd: mailbox command code. 1452 * @cmd: mailbox command code.
@@ -1350,6 +1469,478 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
1350 case MBX_WRITE_WWN: /* 0x98 */ 1469 case MBX_WRITE_WWN: /* 0x98 */
1351 case MBX_LOAD_EXP_ROM: /* 0x9C */ 1470 case MBX_LOAD_EXP_ROM: /* 0x9C */
1352 return LPFC_MBOX_TMO_FLASH_CMD; 1471 return LPFC_MBOX_TMO_FLASH_CMD;
1472 case MBX_SLI4_CONFIG: /* 0x9b */
1473 return LPFC_MBOX_SLI4_CONFIG_TMO;
1353 } 1474 }
1354 return LPFC_MBOX_TMO; 1475 return LPFC_MBOX_TMO;
1355} 1476}
1477
1478/**
1479 * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
1480 * @mbox: pointer to lpfc mbox command.
1481 * @sgentry: sge entry index.
1482 * @phyaddr: physical address for the sge
1483 * @length: Length of the sge.
1484 *
1485 * This routine sets up an entry in the non-embedded mailbox command at the sge
1486 * index location.
1487 **/
1488void
1489lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
1490 dma_addr_t phyaddr, uint32_t length)
1491{
1492 struct lpfc_mbx_nembed_cmd *nembed_sge;
1493
1494 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1495 &mbox->u.mqe.un.nembed_cmd;
1496 nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
1497 nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
1498 nembed_sge->sge[sgentry].length = length;
1499}
1500
1501/**
1502 * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
1503 * @mbox: pointer to lpfc mbox command.
1504 * @sgentry: sge entry index.
1505 *
1506 * This routine gets an entry from the non-embedded mailbox command at the sge
1507 * index location.
1508 **/
1509void
1510lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
1511 struct lpfc_mbx_sge *sge)
1512{
1513 struct lpfc_mbx_nembed_cmd *nembed_sge;
1514
1515 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1516 &mbox->u.mqe.un.nembed_cmd;
1517 sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
1518 sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
1519 sge->length = nembed_sge->sge[sgentry].length;
1520}
1521
1522/**
1523 * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
1524 * @phba: pointer to lpfc hba data structure.
1525 * @mbox: pointer to lpfc mbox command.
1526 *
1527 * This routine frees SLI4 specific mailbox command for sending IOCTL command.
1528 **/
1529void
1530lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1531{
1532 struct lpfc_mbx_sli4_config *sli4_cfg;
1533 struct lpfc_mbx_sge sge;
1534 dma_addr_t phyaddr;
1535 uint32_t sgecount, sgentry;
1536
1537 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1538
1539 /* For embedded mbox command, just free the mbox command */
1540 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1541 mempool_free(mbox, phba->mbox_mem_pool);
1542 return;
1543 }
1544
1545 /* For non-embedded mbox command, we need to free the pages first */
1546 sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
1547 /* There is nothing we can do if there is no sge address array */
1548 if (unlikely(!mbox->sge_array)) {
1549 mempool_free(mbox, phba->mbox_mem_pool);
1550 return;
1551 }
1552 /* Each non-embedded DMA memory was allocated in the length of a page */
1553 for (sgentry = 0; sgentry < sgecount; sgentry++) {
1554 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1555 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1556 dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE,
1557 mbox->sge_array->addr[sgentry], phyaddr);
1558 }
1559 /* Free the sge address array memory */
1560 kfree(mbox->sge_array);
1561 /* Finally, free the mailbox command itself */
1562 mempool_free(mbox, phba->mbox_mem_pool);
1563}
1564
1565/**
1566 * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command
1567 * @phba: pointer to lpfc hba data structure.
1568 * @mbox: pointer to lpfc mbox command.
1569 * @subsystem: The sli4 config sub mailbox subsystem.
1570 * @opcode: The sli4 config sub mailbox command opcode.
1571 * @length: Length of the sli4 config mailbox command.
1572 *
1573 * This routine sets up the header fields of SLI4 specific mailbox command
1574 * for sending IOCTL command.
1575 *
1576 * Return: the actual length of the mbox command allocated (mostly useful
1577 * for none embedded mailbox command).
1578 **/
1579int
1580lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1581 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
1582{
1583 struct lpfc_mbx_sli4_config *sli4_config;
1584 union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
1585 uint32_t alloc_len;
1586 uint32_t resid_len;
1587 uint32_t pagen, pcount;
1588 void *viraddr;
1589 dma_addr_t phyaddr;
1590
1591 /* Set up SLI4 mailbox command header fields */
1592 memset(mbox, 0, sizeof(*mbox));
1593 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
1594
1595 /* Set up SLI4 ioctl command header fields */
1596 sli4_config = &mbox->u.mqe.un.sli4_config;
1597
1598 /* Setup for the embedded mbox command */
1599 if (emb) {
1600 /* Set up main header fields */
1601 bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
1602 sli4_config->header.cfg_mhdr.payload_length =
1603 LPFC_MBX_CMD_HDR_LENGTH + length;
1604 /* Set up sub-header fields following main header */
1605 bf_set(lpfc_mbox_hdr_opcode,
1606 &sli4_config->header.cfg_shdr.request, opcode);
1607 bf_set(lpfc_mbox_hdr_subsystem,
1608 &sli4_config->header.cfg_shdr.request, subsystem);
1609 sli4_config->header.cfg_shdr.request.request_length = length;
1610 return length;
1611 }
1612
1613 /* Setup for the none-embedded mbox command */
1614 pcount = (PAGE_ALIGN(length))/PAGE_SIZE;
1615 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1616 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1617 /* Allocate record for keeping SGE virtual addresses */
1618 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1619 GFP_KERNEL);
1620 if (!mbox->sge_array)
1621 return 0;
1622
1623 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1624 /* The DMA memory is always allocated in the length of a
1625 * page even though the last SGE might not fill up to a
1626 * page, this is used as a priori size of PAGE_SIZE for
1627 * the later DMA memory free.
1628 */
1629 viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE,
1630 &phyaddr, GFP_KERNEL);
1631 /* In case of malloc fails, proceed with whatever we have */
1632 if (!viraddr)
1633 break;
1634 memset(viraddr, 0, PAGE_SIZE);
1635 mbox->sge_array->addr[pagen] = viraddr;
1636 /* Keep the first page for later sub-header construction */
1637 if (pagen == 0)
1638 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1639 resid_len = length - alloc_len;
1640 if (resid_len > PAGE_SIZE) {
1641 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1642 PAGE_SIZE);
1643 alloc_len += PAGE_SIZE;
1644 } else {
1645 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1646 resid_len);
1647 alloc_len = length;
1648 }
1649 }
1650
1651 /* Set up main header fields in mailbox command */
1652 sli4_config->header.cfg_mhdr.payload_length = alloc_len;
1653 bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
1654
1655 /* Set up sub-header fields into the first page */
1656 if (pagen > 0) {
1657 bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
1658 bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
1659 cfg_shdr->request.request_length =
1660 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1661 }
1662 /* The sub-header is in DMA memory, which needs endian converstion */
1663 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1664 sizeof(union lpfc_sli4_cfg_shdr));
1665
1666 return alloc_len;
1667}
1668
1669/**
1670 * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
1671 * @phba: pointer to lpfc hba data structure.
1672 * @mbox: pointer to lpfc mbox command.
1673 *
1674 * This routine gets the opcode from a SLI4 specific mailbox command for
1675 * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG
1676 * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be
1677 * returned.
1678 **/
1679uint8_t
1680lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1681{
1682 struct lpfc_mbx_sli4_config *sli4_cfg;
1683 union lpfc_sli4_cfg_shdr *cfg_shdr;
1684
1685 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1686 return 0;
1687 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1688
1689 /* For embedded mbox command, get opcode from embedded sub-header*/
1690 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1691 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1692 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1693 }
1694
1695 /* For non-embedded mbox command, get opcode from first dma page */
1696 if (unlikely(!mbox->sge_array))
1697 return 0;
1698 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1699 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1700}
1701
1702/**
1703 * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
1704 * @mboxq: pointer to lpfc mbox command.
1705 *
1706 * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
1707 * mailbox command.
1708 **/
1709void
1710lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
1711{
1712 /* Set up SLI4 mailbox command header fields */
1713 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
1714 bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
1715
1716 /* Set up host requested features. */
1717 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
1718
1719 if (phba->cfg_enable_fip)
1720 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
1721 else
1722 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 1);
1723
1724 /* Enable DIF (block guard) only if configured to do so. */
1725 if (phba->cfg_enable_bg)
1726 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
1727
1728 /* Enable NPIV only if configured to do so. */
1729 if (phba->max_vpi && phba->cfg_enable_npiv)
1730 bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
1731
1732 return;
1733}
1734
1735/**
1736 * lpfc_init_vfi - Initialize the INIT_VFI mailbox command
1737 * @mbox: pointer to lpfc mbox command to initialize.
1738 * @vport: Vport associated with the VF.
1739 *
1740 * This routine initializes @mbox to all zeros and then fills in the mailbox
1741 * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
1742 * in the context of an FCF. The driver issues this command to setup a VFI
1743 * before issuing a FLOGI to login to the VSAN. The driver should also issue a
1744 * REG_VFI after a successful VSAN login.
1745 **/
1746void
1747lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
1748{
1749 struct lpfc_mbx_init_vfi *init_vfi;
1750
1751 memset(mbox, 0, sizeof(*mbox));
1752 init_vfi = &mbox->u.mqe.un.init_vfi;
1753 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
1754 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
1755 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
1756 bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
1757 bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
1758}
1759
1760/**
1761 * lpfc_reg_vfi - Initialize the REG_VFI mailbox command
1762 * @mbox: pointer to lpfc mbox command to initialize.
1763 * @vport: vport associated with the VF.
1764 * @phys: BDE DMA bus address used to send the service parameters to the HBA.
1765 *
1766 * This routine initializes @mbox to all zeros and then fills in the mailbox
1767 * fields from @vport, and uses @buf as a DMAable buffer to send the vport's
1768 * fc service parameters to the HBA for this VFI. REG_VFI configures virtual
1769 * fabrics identified by VFI in the context of an FCF.
1770 **/
1771void
1772lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1773{
1774 struct lpfc_mbx_reg_vfi *reg_vfi;
1775
1776 memset(mbox, 0, sizeof(*mbox));
1777 reg_vfi = &mbox->u.mqe.un.reg_vfi;
1778 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
1779 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
1780 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
1781 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
1782 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
1783 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
1784 reg_vfi->bde.addrLow = putPaddrLow(phys);
1785 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
1786 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1787 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
1788}
1789
1790/**
1791 * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
1792 * @mbox: pointer to lpfc mbox command to initialize.
1793 * @vpi: VPI to be initialized.
1794 *
1795 * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
1796 * command to activate a virtual N_Port. The HBA assigns a MAC address to use
1797 * with the virtual N Port. The SLI Host issues this command before issuing a
1798 * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
1799 * successful virtual NPort login.
1800 **/
1801void
1802lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi)
1803{
1804 memset(mbox, 0, sizeof(*mbox));
1805 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
1806 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi);
1807}
1808
1809/**
1810 * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
1811 * @mbox: pointer to lpfc mbox command to initialize.
1812 * @vfi: VFI to be unregistered.
1813 *
1814 * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
1815 * (logical NPort) into the inactive state. The SLI Host must have logged out
1816 * and unregistered all remote N_Ports to abort any activity on the virtual
1817 * fabric. The SLI Port posts the mailbox response after marking the virtual
1818 * fabric inactive.
1819 **/
1820void
1821lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi)
1822{
1823 memset(mbox, 0, sizeof(*mbox));
1824 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
1825 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi);
1826}
1827
1828/**
1829 * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
1830 * @phba: pointer to the hba structure containing.
1831 * @mbox: pointer to lpfc mbox command to initialize.
1832 *
1833 * This function create a SLI4 dump mailbox command to dump FCoE
1834 * parameters stored in region 23.
1835 **/
1836int
1837lpfc_dump_fcoe_param(struct lpfc_hba *phba,
1838 struct lpfcMboxq *mbox)
1839{
1840 struct lpfc_dmabuf *mp = NULL;
1841 MAILBOX_t *mb;
1842
1843 memset(mbox, 0, sizeof(*mbox));
1844 mb = &mbox->u.mb;
1845
1846 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1847 if (mp)
1848 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1849
1850 if (!mp || !mp->virt) {
1851 kfree(mp);
1852 /* dump_fcoe_param failed to allocate memory */
1853 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
1854 "2569 lpfc_dump_fcoe_param: memory"
1855 " allocation failed \n");
1856 return 1;
1857 }
1858
1859 memset(mp->virt, 0, LPFC_BPL_SIZE);
1860 INIT_LIST_HEAD(&mp->list);
1861
1862 /* save address for completion */
1863 mbox->context1 = (uint8_t *) mp;
1864
1865 mb->mbxCommand = MBX_DUMP_MEMORY;
1866 mb->un.varDmp.type = DMP_NV_PARAMS;
1867 mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM;
1868 mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE;
1869 mb->un.varWords[3] = putPaddrLow(mp->phys);
1870 mb->un.varWords[4] = putPaddrHigh(mp->phys);
1871 return 0;
1872}
1873
1874/**
1875 * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
1876 * @phba: pointer to the hba structure containing the FCF index and RQ ID.
1877 * @mbox: pointer to lpfc mbox command to initialize.
1878 *
1879 * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
1880 * SLI Host uses the command to activate an FCF after it has acquired FCF
1881 * information via a READ_FCF mailbox command. This mailbox command also is used
1882 * to indicate where received unsolicited frames from this FCF will be sent. By
1883 * default this routine will set up the FCF to forward all unsolicited frames
1884 * the the RQ ID passed in the @phba. This can be overridden by the caller for
1885 * more complicated setups.
1886 **/
1887void
1888lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1889{
1890 struct lpfc_mbx_reg_fcfi *reg_fcfi;
1891
1892 memset(mbox, 0, sizeof(*mbox));
1893 reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
1894 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
1895 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
1896 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
1897 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
1898 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
1899 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx);
1900 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
1901 bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
1902 (~phba->fcf.addr_mode) & 0x3);
1903 if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
1904 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
1905 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id);
1906 }
1907}
1908
1909/**
1910 * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
1911 * @mbox: pointer to lpfc mbox command to initialize.
1912 * @fcfi: FCFI to be unregistered.
1913 *
1914 * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
1915 * The SLI Host uses the command to inactivate an FCFI.
1916 **/
1917void
1918lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
1919{
1920 memset(mbox, 0, sizeof(*mbox));
1921 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
1922 bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
1923}
1924
1925/**
1926 * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
1927 * @mbox: pointer to lpfc mbox command to initialize.
1928 * @ndlp: The nodelist structure that describes the RPI to resume.
1929 *
1930 * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
1931 * link event.
1932 **/
1933void
1934lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
1935{
1936 struct lpfc_mbx_resume_rpi *resume_rpi;
1937
1938 memset(mbox, 0, sizeof(*mbox));
1939 resume_rpi = &mbox->u.mqe.un.resume_rpi;
1940 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
1941 bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi);
1942 bf_set(lpfc_resume_rpi_vpi, resume_rpi,
1943 ndlp->vport->vpi + ndlp->vport->phba->vpi_base);
1944 bf_set(lpfc_resume_rpi_vfi, resume_rpi,
1945 ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
1946}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 35a976733398..e198c917c13e 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28 28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -45,7 +47,7 @@
45 * @phba: HBA to allocate pools for 47 * @phba: HBA to allocate pools for
46 * 48 *
47 * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, 49 * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
48 * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools 50 * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
49 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. 51 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
50 * 52 *
51 * Notes: Not interrupt-safe. Must be called with no locks held. If any 53 * Notes: Not interrupt-safe. Must be called with no locks held. If any
@@ -56,19 +58,30 @@
56 * -ENOMEM on failure (if any memory allocations fail) 58 * -ENOMEM on failure (if any memory allocations fail)
57 **/ 59 **/
58int 60int
59lpfc_mem_alloc(struct lpfc_hba * phba) 61lpfc_mem_alloc(struct lpfc_hba *phba, int align)
60{ 62{
61 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 63 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
62 int longs; 64 int longs;
63 int i; 65 int i;
64 66
65 phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", 67 if (phba->sli_rev == LPFC_SLI_REV4)
66 phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0); 68 phba->lpfc_scsi_dma_buf_pool =
69 pci_pool_create("lpfc_scsi_dma_buf_pool",
70 phba->pcidev,
71 phba->cfg_sg_dma_buf_size,
72 phba->cfg_sg_dma_buf_size,
73 0);
74 else
75 phba->lpfc_scsi_dma_buf_pool =
76 pci_pool_create("lpfc_scsi_dma_buf_pool",
77 phba->pcidev, phba->cfg_sg_dma_buf_size,
78 align, 0);
67 if (!phba->lpfc_scsi_dma_buf_pool) 79 if (!phba->lpfc_scsi_dma_buf_pool)
68 goto fail; 80 goto fail;
69 81
70 phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, 82 phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
71 LPFC_BPL_SIZE, 8,0); 83 LPFC_BPL_SIZE,
84 align, 0);
72 if (!phba->lpfc_mbuf_pool) 85 if (!phba->lpfc_mbuf_pool)
73 goto fail_free_dma_buf_pool; 86 goto fail_free_dma_buf_pool;
74 87
@@ -97,23 +110,31 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
97 sizeof(struct lpfc_nodelist)); 110 sizeof(struct lpfc_nodelist));
98 if (!phba->nlp_mem_pool) 111 if (!phba->nlp_mem_pool)
99 goto fail_free_mbox_pool; 112 goto fail_free_mbox_pool;
100 113 phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
101 phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev, 114 phba->pcidev,
102 LPFC_BPL_SIZE, 8, 0); 115 LPFC_HDR_BUF_SIZE, align, 0);
103 if (!phba->lpfc_hbq_pool) 116 if (!phba->lpfc_hrb_pool)
104 goto fail_free_nlp_mem_pool; 117 goto fail_free_nlp_mem_pool;
118 phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
119 phba->pcidev,
120 LPFC_DATA_BUF_SIZE, align, 0);
121 if (!phba->lpfc_drb_pool)
122 goto fail_free_hbq_pool;
105 123
106 /* vpi zero is reserved for the physical port so add 1 to max */ 124 /* vpi zero is reserved for the physical port so add 1 to max */
107 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; 125 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
108 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); 126 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
109 if (!phba->vpi_bmask) 127 if (!phba->vpi_bmask)
110 goto fail_free_hbq_pool; 128 goto fail_free_dbq_pool;
111 129
112 return 0; 130 return 0;
113 131
132 fail_free_dbq_pool:
133 pci_pool_destroy(phba->lpfc_drb_pool);
134 phba->lpfc_drb_pool = NULL;
114 fail_free_hbq_pool: 135 fail_free_hbq_pool:
115 lpfc_sli_hbqbuf_free_all(phba); 136 pci_pool_destroy(phba->lpfc_hrb_pool);
116 pci_pool_destroy(phba->lpfc_hbq_pool); 137 phba->lpfc_hrb_pool = NULL;
117 fail_free_nlp_mem_pool: 138 fail_free_nlp_mem_pool:
118 mempool_destroy(phba->nlp_mem_pool); 139 mempool_destroy(phba->nlp_mem_pool);
119 phba->nlp_mem_pool = NULL; 140 phba->nlp_mem_pool = NULL;
@@ -136,27 +157,73 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
136} 157}
137 158
138/** 159/**
139 * lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc 160 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
140 * @phba: HBA to free memory for 161 * @phba: HBA to free memory for
141 * 162 *
142 * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, 163 * Description: Free the memory allocated by lpfc_mem_alloc routine. This
143 * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and 164 * routine is a the counterpart of lpfc_mem_alloc.
144 * lpfc_nodelist. Also frees the VPI bitmask
145 * 165 *
146 * Returns: None 166 * Returns: None
147 **/ 167 **/
148void 168void
149lpfc_mem_free(struct lpfc_hba * phba) 169lpfc_mem_free(struct lpfc_hba *phba)
150{ 170{
151 struct lpfc_sli *psli = &phba->sli;
152 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
153 LPFC_MBOXQ_t *mbox, *next_mbox;
154 struct lpfc_dmabuf *mp;
155 int i; 171 int i;
172 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
156 173
174 /* Free VPI bitmask memory */
157 kfree(phba->vpi_bmask); 175 kfree(phba->vpi_bmask);
176
177 /* Free HBQ pools */
158 lpfc_sli_hbqbuf_free_all(phba); 178 lpfc_sli_hbqbuf_free_all(phba);
179 pci_pool_destroy(phba->lpfc_drb_pool);
180 phba->lpfc_drb_pool = NULL;
181 pci_pool_destroy(phba->lpfc_hrb_pool);
182 phba->lpfc_hrb_pool = NULL;
183
184 /* Free NLP memory pool */
185 mempool_destroy(phba->nlp_mem_pool);
186 phba->nlp_mem_pool = NULL;
187
188 /* Free mbox memory pool */
189 mempool_destroy(phba->mbox_mem_pool);
190 phba->mbox_mem_pool = NULL;
191
192 /* Free MBUF memory pool */
193 for (i = 0; i < pool->current_count; i++)
194 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
195 pool->elements[i].phys);
196 kfree(pool->elements);
197
198 pci_pool_destroy(phba->lpfc_mbuf_pool);
199 phba->lpfc_mbuf_pool = NULL;
159 200
201 /* Free DMA buffer memory pool */
202 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
203 phba->lpfc_scsi_dma_buf_pool = NULL;
204
205 return;
206}
207
208/**
209 * lpfc_mem_free_all - Frees all PCI and driver memory
210 * @phba: HBA to free memory for
211 *
212 * Description: Free memory from PCI and driver memory pools and also those
213 * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
214 * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
215 * the VPI bitmask.
216 *
217 * Returns: None
218 **/
219void
220lpfc_mem_free_all(struct lpfc_hba *phba)
221{
222 struct lpfc_sli *psli = &phba->sli;
223 LPFC_MBOXQ_t *mbox, *next_mbox;
224 struct lpfc_dmabuf *mp;
225
226 /* Free memory used in mailbox queue back to mailbox memory pool */
160 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { 227 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
161 mp = (struct lpfc_dmabuf *) (mbox->context1); 228 mp = (struct lpfc_dmabuf *) (mbox->context1);
162 if (mp) { 229 if (mp) {
@@ -166,6 +233,7 @@ lpfc_mem_free(struct lpfc_hba * phba)
166 list_del(&mbox->list); 233 list_del(&mbox->list);
167 mempool_free(mbox, phba->mbox_mem_pool); 234 mempool_free(mbox, phba->mbox_mem_pool);
168 } 235 }
236 /* Free memory used in mailbox cmpl list back to mailbox memory pool */
169 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { 237 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
170 mp = (struct lpfc_dmabuf *) (mbox->context1); 238 mp = (struct lpfc_dmabuf *) (mbox->context1);
171 if (mp) { 239 if (mp) {
@@ -175,8 +243,10 @@ lpfc_mem_free(struct lpfc_hba * phba)
175 list_del(&mbox->list); 243 list_del(&mbox->list);
176 mempool_free(mbox, phba->mbox_mem_pool); 244 mempool_free(mbox, phba->mbox_mem_pool);
177 } 245 }
178 246 /* Free the active mailbox command back to the mailbox memory pool */
247 spin_lock_irq(&phba->hbalock);
179 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 248 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
249 spin_unlock_irq(&phba->hbalock);
180 if (psli->mbox_active) { 250 if (psli->mbox_active) {
181 mbox = psli->mbox_active; 251 mbox = psli->mbox_active;
182 mp = (struct lpfc_dmabuf *) (mbox->context1); 252 mp = (struct lpfc_dmabuf *) (mbox->context1);
@@ -188,27 +258,14 @@ lpfc_mem_free(struct lpfc_hba * phba)
188 psli->mbox_active = NULL; 258 psli->mbox_active = NULL;
189 } 259 }
190 260
191 for (i = 0; i < pool->current_count; i++) 261 /* Free and destroy all the allocated memory pools */
192 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 262 lpfc_mem_free(phba);
193 pool->elements[i].phys);
194 kfree(pool->elements);
195
196 pci_pool_destroy(phba->lpfc_hbq_pool);
197 mempool_destroy(phba->nlp_mem_pool);
198 mempool_destroy(phba->mbox_mem_pool);
199
200 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
201 pci_pool_destroy(phba->lpfc_mbuf_pool);
202
203 phba->lpfc_hbq_pool = NULL;
204 phba->nlp_mem_pool = NULL;
205 phba->mbox_mem_pool = NULL;
206 phba->lpfc_scsi_dma_buf_pool = NULL;
207 phba->lpfc_mbuf_pool = NULL;
208 263
209 /* Free the iocb lookup array */ 264 /* Free the iocb lookup array */
210 kfree(psli->iocbq_lookup); 265 kfree(psli->iocbq_lookup);
211 psli->iocbq_lookup = NULL; 266 psli->iocbq_lookup = NULL;
267
268 return;
212} 269}
213 270
214/** 271/**
@@ -305,7 +362,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
305 * lpfc_els_hbq_alloc - Allocate an HBQ buffer 362 * lpfc_els_hbq_alloc - Allocate an HBQ buffer
306 * @phba: HBA to allocate HBQ buffer for 363 * @phba: HBA to allocate HBQ buffer for
307 * 364 *
308 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI 365 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
309 * pool along a non-DMA-mapped container for it. 366 * pool along a non-DMA-mapped container for it.
310 * 367 *
311 * Notes: Not interrupt-safe. Must be called with no locks held. 368 * Notes: Not interrupt-safe. Must be called with no locks held.
@@ -323,7 +380,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
323 if (!hbqbp) 380 if (!hbqbp)
324 return NULL; 381 return NULL;
325 382
326 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, 383 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
327 &hbqbp->dbuf.phys); 384 &hbqbp->dbuf.phys);
328 if (!hbqbp->dbuf.virt) { 385 if (!hbqbp->dbuf.virt) {
329 kfree(hbqbp); 386 kfree(hbqbp);
@@ -334,7 +391,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
334} 391}
335 392
336/** 393/**
337 * lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc 394 * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
338 * @phba: HBA buffer was allocated for 395 * @phba: HBA buffer was allocated for
339 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc 396 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
340 * 397 *
@@ -348,12 +405,73 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
348void 405void
349lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) 406lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
350{ 407{
351 pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); 408 pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
352 kfree(hbqbp); 409 kfree(hbqbp);
353 return; 410 return;
354} 411}
355 412
356/** 413/**
414 * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
415 * @phba: HBA to allocate a receive buffer for
416 *
417 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
418 * pool along a non-DMA-mapped container for it.
419 *
420 * Notes: Not interrupt-safe. Must be called with no locks held.
421 *
422 * Returns:
423 * pointer to HBQ on success
424 * NULL on failure
425 **/
426struct hbq_dmabuf *
427lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
428{
429 struct hbq_dmabuf *dma_buf;
430
431 dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
432 if (!dma_buf)
433 return NULL;
434
435 dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
436 &dma_buf->hbuf.phys);
437 if (!dma_buf->hbuf.virt) {
438 kfree(dma_buf);
439 return NULL;
440 }
441 dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
442 &dma_buf->dbuf.phys);
443 if (!dma_buf->dbuf.virt) {
444 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
445 dma_buf->hbuf.phys);
446 kfree(dma_buf);
447 return NULL;
448 }
449 dma_buf->size = LPFC_BPL_SIZE;
450 return dma_buf;
451}
452
453/**
454 * lpfc_sli4_rb_free - Frees a receive buffer
455 * @phba: HBA buffer was allocated for
456 * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
457 *
458 * Description: Frees both the container and the DMA-mapped buffers returned by
459 * lpfc_sli4_rb_alloc.
460 *
461 * Notes: Can be called with or without locks held.
462 *
463 * Returns: None
464 **/
465void
466lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
467{
468 pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
469 pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
470 kfree(dmab);
471 return;
472}
473
474/**
357 * lpfc_in_buf_free - Free a DMA buffer 475 * lpfc_in_buf_free - Free a DMA buffer
358 * @phba: HBA buffer is associated with 476 * @phba: HBA buffer is associated with
359 * @mp: Buffer to free 477 * @mp: Buffer to free
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 08cdc77af41c..3e74136f1ede 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
1 /******************************************************************* 1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -361,7 +363,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
361 if (!mbox) 363 if (!mbox)
362 goto out; 364 goto out;
363 365
364 rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID, 366 rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
365 (uint8_t *) sp, mbox, 0); 367 (uint8_t *) sp, mbox, 0);
366 if (rc) { 368 if (rc) {
367 mempool_free(mbox, phba->mbox_mem_pool); 369 mempool_free(mbox, phba->mbox_mem_pool);
@@ -495,11 +497,19 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
495 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 497 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
496 else 498 else
497 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 499 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
500 if ((ndlp->nlp_DID == Fabric_DID) &&
501 vport->port_type == LPFC_NPIV_PORT) {
502 lpfc_linkdown_port(vport);
503 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
504 spin_lock_irq(shost->host_lock);
505 ndlp->nlp_flag |= NLP_DELAY_TMO;
506 spin_unlock_irq(shost->host_lock);
498 507
499 if ((!(ndlp->nlp_type & NLP_FABRIC) && 508 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
500 ((ndlp->nlp_type & NLP_FCP_TARGET) || 509 } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
501 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || 510 ((ndlp->nlp_type & NLP_FCP_TARGET) ||
502 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 511 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
512 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
503 /* Only try to re-login if this is NOT a Fabric Node */ 513 /* Only try to re-login if this is NOT a Fabric Node */
504 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 514 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
505 spin_lock_irq(shost->host_lock); 515 spin_lock_irq(shost->host_lock);
@@ -567,7 +577,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
567{ 577{
568 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 578 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
569 579
570 if (!ndlp->nlp_rpi) { 580 if (!(ndlp->nlp_flag & NLP_RPI_VALID)) {
571 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 581 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
572 return 0; 582 return 0;
573 } 583 }
@@ -857,7 +867,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
857 867
858 lpfc_unreg_rpi(vport, ndlp); 868 lpfc_unreg_rpi(vport, ndlp);
859 869
860 if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID, 870 if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
861 (uint8_t *) sp, mbox, 0) == 0) { 871 (uint8_t *) sp, mbox, 0) == 0) {
862 switch (ndlp->nlp_DID) { 872 switch (ndlp->nlp_DID) {
863 case NameServer_DID: 873 case NameServer_DID:
@@ -1068,6 +1078,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1068 struct lpfc_iocbq *cmdiocb, *rspiocb; 1078 struct lpfc_iocbq *cmdiocb, *rspiocb;
1069 IOCB_t *irsp; 1079 IOCB_t *irsp;
1070 ADISC *ap; 1080 ADISC *ap;
1081 int rc;
1071 1082
1072 cmdiocb = (struct lpfc_iocbq *) arg; 1083 cmdiocb = (struct lpfc_iocbq *) arg;
1073 rspiocb = cmdiocb->context_un.rsp_iocb; 1084 rspiocb = cmdiocb->context_un.rsp_iocb;
@@ -1093,6 +1104,15 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1093 return ndlp->nlp_state; 1104 return ndlp->nlp_state;
1094 } 1105 }
1095 1106
1107 if (phba->sli_rev == LPFC_SLI_REV4) {
1108 rc = lpfc_sli4_resume_rpi(ndlp);
1109 if (rc) {
1110 /* Stay in state and retry. */
1111 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1112 return ndlp->nlp_state;
1113 }
1114 }
1115
1096 if (ndlp->nlp_type & NLP_FCP_TARGET) { 1116 if (ndlp->nlp_type & NLP_FCP_TARGET) {
1097 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1117 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1098 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1118 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
@@ -1100,6 +1120,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1100 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1120 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1101 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1121 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1102 } 1122 }
1123
1103 return ndlp->nlp_state; 1124 return ndlp->nlp_state;
1104} 1125}
1105 1126
@@ -1190,7 +1211,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1190 1211
1191 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1212 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1192 if ((mb = phba->sli.mbox_active)) { 1213 if ((mb = phba->sli.mbox_active)) {
1193 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1214 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1194 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1215 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1195 lpfc_nlp_put(ndlp); 1216 lpfc_nlp_put(ndlp);
1196 mb->context2 = NULL; 1217 mb->context2 = NULL;
@@ -1200,7 +1221,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1200 1221
1201 spin_lock_irq(&phba->hbalock); 1222 spin_lock_irq(&phba->hbalock);
1202 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1223 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1203 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1224 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1204 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1225 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1205 mp = (struct lpfc_dmabuf *) (mb->context1); 1226 mp = (struct lpfc_dmabuf *) (mb->context1);
1206 if (mp) { 1227 if (mp) {
@@ -1251,7 +1272,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1251{ 1272{
1252 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1273 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1253 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1274 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1254 MAILBOX_t *mb = &pmb->mb; 1275 MAILBOX_t *mb = &pmb->u.mb;
1255 uint32_t did = mb->un.varWords[1]; 1276 uint32_t did = mb->un.varWords[1];
1256 1277
1257 if (mb->mbxStatus) { 1278 if (mb->mbxStatus) {
@@ -1283,6 +1304,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1283 } 1304 }
1284 1305
1285 ndlp->nlp_rpi = mb->un.varWords[0]; 1306 ndlp->nlp_rpi = mb->un.varWords[0];
1307 ndlp->nlp_flag |= NLP_RPI_VALID;
1286 1308
1287 /* Only if we are not a fabric nport do we issue PRLI */ 1309 /* Only if we are not a fabric nport do we issue PRLI */
1288 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1310 if (!(ndlp->nlp_type & NLP_FABRIC)) {
@@ -1878,11 +1900,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
1878 void *arg, uint32_t evt) 1900 void *arg, uint32_t evt)
1879{ 1901{
1880 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1902 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1881 MAILBOX_t *mb = &pmb->mb; 1903 MAILBOX_t *mb = &pmb->u.mb;
1882 1904
1883 if (!mb->mbxStatus) 1905 if (!mb->mbxStatus) {
1884 ndlp->nlp_rpi = mb->un.varWords[0]; 1906 ndlp->nlp_rpi = mb->un.varWords[0];
1885 else { 1907 ndlp->nlp_flag |= NLP_RPI_VALID;
1908 } else {
1886 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 1909 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
1887 lpfc_drop_node(vport, ndlp); 1910 lpfc_drop_node(vport, ndlp);
1888 return NLP_STE_FREED_NODE; 1911 return NLP_STE_FREED_NODE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 167b66dd34c7..da59c4f0168f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -31,8 +31,10 @@
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32 32
33#include "lpfc_version.h" 33#include "lpfc_version.h"
34#include "lpfc_hw4.h"
34#include "lpfc_hw.h" 35#include "lpfc_hw.h"
35#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
36#include "lpfc_nl.h" 38#include "lpfc_nl.h"
37#include "lpfc_disc.h" 39#include "lpfc_disc.h"
38#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
@@ -57,6 +59,8 @@ static char *dif_op_str[] = {
57 "SCSI_PROT_READ_CONVERT", 59 "SCSI_PROT_READ_CONVERT",
58 "SCSI_PROT_WRITE_CONVERT" 60 "SCSI_PROT_WRITE_CONVERT"
59}; 61};
62static void
63lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
60 64
61static void 65static void
62lpfc_debug_save_data(struct scsi_cmnd *cmnd) 66lpfc_debug_save_data(struct scsi_cmnd *cmnd)
@@ -112,6 +116,27 @@ lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
112} 116}
113 117
114/** 118/**
119 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
120 * @phba: Pointer to HBA object.
121 * @lpfc_cmd: lpfc scsi command object pointer.
122 *
123 * This function is called from the lpfc_prep_task_mgmt_cmd function to
124 * set the last bit in the response sge entry.
125 **/
126static void
127lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
128 struct lpfc_scsi_buf *lpfc_cmd)
129{
130 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
131 if (sgl) {
132 sgl += 1;
133 sgl->word2 = le32_to_cpu(sgl->word2);
134 bf_set(lpfc_sli4_sge_last, sgl, 1);
135 sgl->word2 = cpu_to_le32(sgl->word2);
136 }
137}
138
139/**
115 * lpfc_update_stats - Update statistical data for the command completion 140 * lpfc_update_stats - Update statistical data for the command completion
116 * @phba: Pointer to HBA object. 141 * @phba: Pointer to HBA object.
117 * @lpfc_cmd: lpfc scsi command object pointer. 142 * @lpfc_cmd: lpfc scsi command object pointer.
@@ -325,7 +350,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
325 350
326 vports = lpfc_create_vport_work_array(phba); 351 vports = lpfc_create_vport_work_array(phba);
327 if (vports != NULL) 352 if (vports != NULL)
328 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 353 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
329 shost = lpfc_shost_from_vport(vports[i]); 354 shost = lpfc_shost_from_vport(vports[i]);
330 shost_for_each_device(sdev, shost) { 355 shost_for_each_device(sdev, shost) {
331 new_queue_depth = 356 new_queue_depth =
@@ -379,7 +404,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
379 404
380 vports = lpfc_create_vport_work_array(phba); 405 vports = lpfc_create_vport_work_array(phba);
381 if (vports != NULL) 406 if (vports != NULL)
382 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 407 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
383 shost = lpfc_shost_from_vport(vports[i]); 408 shost = lpfc_shost_from_vport(vports[i]);
384 shost_for_each_device(sdev, shost) { 409 shost_for_each_device(sdev, shost) {
385 if (vports[i]->cfg_lun_queue_depth <= 410 if (vports[i]->cfg_lun_queue_depth <=
@@ -427,7 +452,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
427 452
428 vports = lpfc_create_vport_work_array(phba); 453 vports = lpfc_create_vport_work_array(phba);
429 if (vports != NULL) 454 if (vports != NULL)
430 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 455 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
431 shost = lpfc_shost_from_vport(vports[i]); 456 shost = lpfc_shost_from_vport(vports[i]);
432 shost_for_each_device(sdev, shost) { 457 shost_for_each_device(sdev, shost) {
433 rport = starget_to_rport(scsi_target(sdev)); 458 rport = starget_to_rport(scsi_target(sdev));
@@ -438,22 +463,23 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
438} 463}
439 464
440/** 465/**
441 * lpfc_new_scsi_buf - Scsi buffer allocator 466 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
442 * @vport: The virtual port for which this call being executed. 467 * @vport: The virtual port for which this call being executed.
468 * @num_to_allocate: The requested number of buffers to allocate.
443 * 469 *
444 * This routine allocates a scsi buffer, which contains all the necessary 470 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
445 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 471 * the scsi buffer contains all the necessary information needed to initiate
446 * contains information to build the IOCB. The DMAable region contains 472 * a SCSI I/O. The non-DMAable buffer region contains information to build
447 * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to 473 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
448 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL 474 * and the initial BPL. In addition to allocating memory, the FCP CMND and
449 * and the BPL BDE is setup in the IOCB. 475 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
450 * 476 *
451 * Return codes: 477 * Return codes:
452 * NULL - Error 478 * int - number of scsi buffers that were allocated.
453 * Pointer to lpfc_scsi_buf data structure - Success 479 * 0 = failure, less than num_to_alloc is a partial failure.
454 **/ 480 **/
455static struct lpfc_scsi_buf * 481static int
456lpfc_new_scsi_buf(struct lpfc_vport *vport) 482lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
457{ 483{
458 struct lpfc_hba *phba = vport->phba; 484 struct lpfc_hba *phba = vport->phba;
459 struct lpfc_scsi_buf *psb; 485 struct lpfc_scsi_buf *psb;
@@ -463,107 +489,401 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
463 dma_addr_t pdma_phys_fcp_rsp; 489 dma_addr_t pdma_phys_fcp_rsp;
464 dma_addr_t pdma_phys_bpl; 490 dma_addr_t pdma_phys_bpl;
465 uint16_t iotag; 491 uint16_t iotag;
492 int bcnt;
466 493
467 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 494 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
468 if (!psb) 495 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
469 return NULL; 496 if (!psb)
497 break;
498
499 /*
500 * Get memory from the pci pool to map the virt space to pci
501 * bus space for an I/O. The DMA buffer includes space for the
502 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
503 * necessary to support the sg_tablesize.
504 */
505 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
506 GFP_KERNEL, &psb->dma_handle);
507 if (!psb->data) {
508 kfree(psb);
509 break;
510 }
511
512 /* Initialize virtual ptrs to dma_buf region. */
513 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
514
515 /* Allocate iotag for psb->cur_iocbq. */
516 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
517 if (iotag == 0) {
518 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
519 psb->data, psb->dma_handle);
520 kfree(psb);
521 break;
522 }
523 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
524
525 psb->fcp_cmnd = psb->data;
526 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
527 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
528 sizeof(struct fcp_rsp);
529
530 /* Initialize local short-hand pointers. */
531 bpl = psb->fcp_bpl;
532 pdma_phys_fcp_cmd = psb->dma_handle;
533 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
534 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
535 sizeof(struct fcp_rsp);
536
537 /*
538 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
539 * are sg list bdes. Initialize the first two and leave the
540 * rest for queuecommand.
541 */
542 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
543 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
544 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
545 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
546 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
547
548 /* Setup the physical region for the FCP RSP */
549 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
550 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
551 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
552 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
553 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
554
555 /*
556 * Since the IOCB for the FCP I/O is built into this
557 * lpfc_scsi_buf, initialize it with all known data now.
558 */
559 iocb = &psb->cur_iocbq.iocb;
560 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
561 if ((phba->sli_rev == 3) &&
562 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
563 /* fill in immediate fcp command BDE */
564 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
565 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
566 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
567 unsli3.fcp_ext.icd);
568 iocb->un.fcpi64.bdl.addrHigh = 0;
569 iocb->ulpBdeCount = 0;
570 iocb->ulpLe = 0;
571 /* fill in responce BDE */
572 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
573 BUFF_TYPE_BDE_64;
574 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
575 sizeof(struct fcp_rsp);
576 iocb->unsli3.fcp_ext.rbde.addrLow =
577 putPaddrLow(pdma_phys_fcp_rsp);
578 iocb->unsli3.fcp_ext.rbde.addrHigh =
579 putPaddrHigh(pdma_phys_fcp_rsp);
580 } else {
581 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
582 iocb->un.fcpi64.bdl.bdeSize =
583 (2 * sizeof(struct ulp_bde64));
584 iocb->un.fcpi64.bdl.addrLow =
585 putPaddrLow(pdma_phys_bpl);
586 iocb->un.fcpi64.bdl.addrHigh =
587 putPaddrHigh(pdma_phys_bpl);
588 iocb->ulpBdeCount = 1;
589 iocb->ulpLe = 1;
590 }
591 iocb->ulpClass = CLASS3;
592 psb->status = IOSTAT_SUCCESS;
593 /* Put it back into the SCSI buffer list */
594 lpfc_release_scsi_buf_s4(phba, psb);
470 595
471 /*
472 * Get memory from the pci pool to map the virt space to pci bus space
473 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
474 * struct fcp_rsp and the number of bde's necessary to support the
475 * sg_tablesize.
476 */
477 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
478 &psb->dma_handle);
479 if (!psb->data) {
480 kfree(psb);
481 return NULL;
482 } 596 }
483 597
484 /* Initialize virtual ptrs to dma_buf region. */ 598 return bcnt;
485 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 599}
486 600
487 /* Allocate iotag for psb->cur_iocbq. */ 601/**
488 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 602 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
489 if (iotag == 0) { 603 * @phba: pointer to lpfc hba data structure.
490 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 604 * @axri: pointer to the fcp xri abort wcqe structure.
491 psb->data, psb->dma_handle); 605 *
492 kfree (psb); 606 * This routine is invoked by the worker thread to process a SLI4 fast-path
493 return NULL; 607 * FCP aborted xri.
608 **/
609void
610lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
611 struct sli4_wcqe_xri_aborted *axri)
612{
613 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
614 struct lpfc_scsi_buf *psb, *next_psb;
615 unsigned long iflag = 0;
616
617 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag);
618 list_for_each_entry_safe(psb, next_psb,
619 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
620 if (psb->cur_iocbq.sli4_xritag == xri) {
621 list_del(&psb->list);
622 psb->status = IOSTAT_SUCCESS;
623 spin_unlock_irqrestore(
624 &phba->sli4_hba.abts_scsi_buf_list_lock,
625 iflag);
626 lpfc_release_scsi_buf_s4(phba, psb);
627 return;
628 }
494 } 629 }
495 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 630 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
631 iflag);
632}
496 633
497 psb->fcp_cmnd = psb->data; 634/**
498 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 635 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
499 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 636 * @phba: pointer to lpfc hba data structure.
500 sizeof(struct fcp_rsp); 637 *
638 * This routine walks the list of scsi buffers that have been allocated and
639 * repost them to the HBA by using SGL block post. This is needed after a
640 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
641 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
642 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
643 *
644 * Returns: 0 = success, non-zero failure.
645 **/
646int
647lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
648{
649 struct lpfc_scsi_buf *psb;
650 int index, status, bcnt = 0, rcnt = 0, rc = 0;
651 LIST_HEAD(sblist);
652
653 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
654 psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
655 if (psb) {
656 /* Remove from SCSI buffer list */
657 list_del(&psb->list);
658 /* Add it to a local SCSI buffer list */
659 list_add_tail(&psb->list, &sblist);
660 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
661 bcnt = rcnt;
662 rcnt = 0;
663 }
664 } else
665 /* A hole present in the XRI array, need to skip */
666 bcnt = rcnt;
501 667
502 /* Initialize local short-hand pointers. */ 668 if (index == phba->sli4_hba.scsi_xri_cnt - 1)
503 bpl = psb->fcp_bpl; 669 /* End of XRI array for SCSI buffer, complete */
504 pdma_phys_fcp_cmd = psb->dma_handle; 670 bcnt = rcnt;
505 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
506 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
507 sizeof(struct fcp_rsp);
508 671
509 /* 672 /* Continue until collect up to a nembed page worth of sgls */
510 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg 673 if (bcnt == 0)
511 * list bdes. Initialize the first two and leave the rest for 674 continue;
512 * queuecommand. 675 /* Now, post the SCSI buffer list sgls as a block */
513 */ 676 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
514 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); 677 /* Reset SCSI buffer count for next round of posting */
515 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); 678 bcnt = 0;
516 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); 679 while (!list_empty(&sblist)) {
517 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 680 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
518 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); 681 list);
519 682 if (status) {
520 /* Setup the physical region for the FCP RSP */ 683 /* Put this back on the abort scsi list */
521 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); 684 psb->status = IOSTAT_LOCAL_REJECT;
522 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); 685 psb->result = IOERR_ABORT_REQUESTED;
523 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); 686 rc++;
524 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 687 } else
525 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); 688 psb->status = IOSTAT_SUCCESS;
689 /* Put it back into the SCSI buffer list */
690 lpfc_release_scsi_buf_s4(phba, psb);
691 }
692 }
693 return rc;
694}
526 695
527 /* 696/**
528 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 697 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
529 * initialize it with all known data now. 698 * @vport: The virtual port for which this call being executed.
530 */ 699 * @num_to_allocate: The requested number of buffers to allocate.
531 iocb = &psb->cur_iocbq.iocb; 700 *
532 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 701 * This routine allocates a scsi buffer for device with SLI-4 interface spec,
533 if ((phba->sli_rev == 3) && 702 * the scsi buffer contains all the necessary information needed to initiate
534 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 703 * a SCSI I/O.
535 /* fill in immediate fcp command BDE */ 704 *
536 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 705 * Return codes:
706 * int - number of scsi buffers that were allocated.
707 * 0 = failure, less than num_to_alloc is a partial failure.
708 **/
709static int
710lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
711{
712 struct lpfc_hba *phba = vport->phba;
713 struct lpfc_scsi_buf *psb;
714 struct sli4_sge *sgl;
715 IOCB_t *iocb;
716 dma_addr_t pdma_phys_fcp_cmd;
717 dma_addr_t pdma_phys_fcp_rsp;
718 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
719 uint16_t iotag, last_xritag = NO_XRI;
720 int status = 0, index;
721 int bcnt;
722 int non_sequential_xri = 0;
723 int rc = 0;
724 LIST_HEAD(sblist);
725
726 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
727 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
728 if (!psb)
729 break;
730
731 /*
732 * Get memory from the pci pool to map the virt space to pci bus
733 * space for an I/O. The DMA buffer includes space for the
734 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
735 * necessary to support the sg_tablesize.
736 */
737 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
738 GFP_KERNEL, &psb->dma_handle);
739 if (!psb->data) {
740 kfree(psb);
741 break;
742 }
743
744 /* Initialize virtual ptrs to dma_buf region. */
745 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
746
747 /* Allocate iotag for psb->cur_iocbq. */
748 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
749 if (iotag == 0) {
750 kfree(psb);
751 break;
752 }
753
754 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
755 if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
756 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
757 psb->data, psb->dma_handle);
758 kfree(psb);
759 break;
760 }
761 if (last_xritag != NO_XRI
762 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
763 non_sequential_xri = 1;
764 } else
765 list_add_tail(&psb->list, &sblist);
766 last_xritag = psb->cur_iocbq.sli4_xritag;
767
768 index = phba->sli4_hba.scsi_xri_cnt++;
769 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
770
771 psb->fcp_bpl = psb->data;
772 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
773 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
774 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
775 sizeof(struct fcp_cmnd));
776
777 /* Initialize local short-hand pointers. */
778 sgl = (struct sli4_sge *)psb->fcp_bpl;
779 pdma_phys_bpl = psb->dma_handle;
780 pdma_phys_fcp_cmd =
781 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
782 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
783 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
784
785 /*
786 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
787 * are sg list bdes. Initialize the first two and leave the
788 * rest for queuecommand.
789 */
790 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
791 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
792 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
793 bf_set(lpfc_sli4_sge_last, sgl, 0);
794 sgl->word2 = cpu_to_le32(sgl->word2);
795 sgl->word3 = cpu_to_le32(sgl->word3);
796 sgl++;
797
798 /* Setup the physical region for the FCP RSP */
799 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
800 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
801 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
802 bf_set(lpfc_sli4_sge_last, sgl, 1);
803 sgl->word2 = cpu_to_le32(sgl->word2);
804 sgl->word3 = cpu_to_le32(sgl->word3);
805
806 /*
807 * Since the IOCB for the FCP I/O is built into this
808 * lpfc_scsi_buf, initialize it with all known data now.
809 */
810 iocb = &psb->cur_iocbq.iocb;
811 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
812 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
813 /* setting the BLP size to 2 * sizeof BDE may not be correct.
814 * We are setting the bpl to point to out sgl. An sgl's
815 * entries are 16 bytes, a bpl entries are 12 bytes.
816 */
537 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 817 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
538 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 818 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
539 unsli3.fcp_ext.icd); 819 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
540 iocb->un.fcpi64.bdl.addrHigh = 0;
541 iocb->ulpBdeCount = 0;
542 iocb->ulpLe = 0;
543 /* fill in responce BDE */
544 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
545 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
546 sizeof(struct fcp_rsp);
547 iocb->unsli3.fcp_ext.rbde.addrLow =
548 putPaddrLow(pdma_phys_fcp_rsp);
549 iocb->unsli3.fcp_ext.rbde.addrHigh =
550 putPaddrHigh(pdma_phys_fcp_rsp);
551 } else {
552 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
553 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
554 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
555 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
556 iocb->ulpBdeCount = 1; 820 iocb->ulpBdeCount = 1;
557 iocb->ulpLe = 1; 821 iocb->ulpLe = 1;
822 iocb->ulpClass = CLASS3;
823 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
824 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
825 else
826 pdma_phys_bpl1 = 0;
827 psb->dma_phys_bpl = pdma_phys_bpl;
828 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
829 if (non_sequential_xri) {
830 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
831 pdma_phys_bpl1,
832 psb->cur_iocbq.sli4_xritag);
833 if (status) {
834 /* Put this back on the abort scsi list */
835 psb->status = IOSTAT_LOCAL_REJECT;
836 psb->result = IOERR_ABORT_REQUESTED;
837 rc++;
838 } else
839 psb->status = IOSTAT_SUCCESS;
840 /* Put it back into the SCSI buffer list */
841 lpfc_release_scsi_buf_s4(phba, psb);
842 break;
843 }
844 }
845 if (bcnt) {
846 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
847 /* Reset SCSI buffer count for next round of posting */
848 while (!list_empty(&sblist)) {
849 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
850 list);
851 if (status) {
852 /* Put this back on the abort scsi list */
853 psb->status = IOSTAT_LOCAL_REJECT;
854 psb->result = IOERR_ABORT_REQUESTED;
855 rc++;
856 } else
857 psb->status = IOSTAT_SUCCESS;
858 /* Put it back into the SCSI buffer list */
859 lpfc_release_scsi_buf_s4(phba, psb);
860 }
558 } 861 }
559 iocb->ulpClass = CLASS3;
560 862
561 return psb; 863 return bcnt + non_sequential_xri - rc;
562} 864}
563 865
564/** 866/**
565 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba 867 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
566 * @phba: The Hba for which this call is being executed. 868 * @vport: The virtual port for which this call being executed.
869 * @num_to_allocate: The requested number of buffers to allocate.
870 *
871 * This routine wraps the actual SCSI buffer allocator function pointer from
872 * the lpfc_hba struct.
873 *
874 * Return codes:
875 * int - number of scsi buffers that were allocated.
876 * 0 = failure, less than num_to_alloc is a partial failure.
877 **/
878static inline int
879lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
880{
881 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
882}
883
884/**
885 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
886 * @phba: The HBA for which this call is being executed.
567 * 887 *
568 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 888 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
569 * and returns to caller. 889 * and returns to caller.
@@ -591,7 +911,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
591} 911}
592 912
593/** 913/**
594 * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list 914 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
595 * @phba: The Hba for which this call is being executed. 915 * @phba: The Hba for which this call is being executed.
596 * @psb: The scsi buffer which is being released. 916 * @psb: The scsi buffer which is being released.
597 * 917 *
@@ -599,7 +919,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
599 * lpfc_scsi_buf_list list. 919 * lpfc_scsi_buf_list list.
600 **/ 920 **/
601static void 921static void
602lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 922lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
603{ 923{
604 unsigned long iflag = 0; 924 unsigned long iflag = 0;
605 925
@@ -610,21 +930,69 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
610} 930}
611 931
612/** 932/**
613 * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer 933 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
934 * @phba: The Hba for which this call is being executed.
935 * @psb: The scsi buffer which is being released.
936 *
937 * This routine releases @psb scsi buffer by adding it to tail of @phba
938 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
939 * and cannot be reused for at least RA_TOV amount of time if it was
940 * aborted.
941 **/
942static void
943lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
944{
945 unsigned long iflag = 0;
946
947 if (psb->status == IOSTAT_LOCAL_REJECT
948 && psb->result == IOERR_ABORT_REQUESTED) {
949 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
950 iflag);
951 psb->pCmd = NULL;
952 list_add_tail(&psb->list,
953 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
954 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
955 iflag);
956 } else {
957
958 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
959 psb->pCmd = NULL;
960 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
961 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
962 }
963}
964
965/**
966 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
967 * @phba: The Hba for which this call is being executed.
968 * @psb: The scsi buffer which is being released.
969 *
970 * This routine releases @psb scsi buffer by adding it to tail of @phba
971 * lpfc_scsi_buf_list list.
972 **/
973static void
974lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
975{
976
977 phba->lpfc_release_scsi_buf(phba, psb);
978}
979
980/**
981 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
614 * @phba: The Hba for which this call is being executed. 982 * @phba: The Hba for which this call is being executed.
615 * @lpfc_cmd: The scsi buffer which is going to be mapped. 983 * @lpfc_cmd: The scsi buffer which is going to be mapped.
616 * 984 *
617 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 985 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
618 * field of @lpfc_cmd. This routine scans through sg elements and format the 986 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
619 * bdea. This routine also initializes all IOCB fields which are dependent on 987 * through sg elements and format the bdea. This routine also initializes all
620 * scsi command request buffer. 988 * IOCB fields which are dependent on scsi command request buffer.
621 * 989 *
622 * Return codes: 990 * Return codes:
623 * 1 - Error 991 * 1 - Error
624 * 0 - Success 992 * 0 - Success
625 **/ 993 **/
626static int 994static int
627lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 995lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
628{ 996{
629 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 997 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
630 struct scatterlist *sgel = NULL; 998 struct scatterlist *sgel = NULL;
@@ -827,8 +1195,8 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
827 * @reftag: out: ref tag (reference tag) 1195 * @reftag: out: ref tag (reference tag)
828 * 1196 *
829 * Description: 1197 * Description:
830 * Extract DIF paramters from the command if possible. Otherwise, 1198 * Extract DIF parameters from the command if possible. Otherwise,
831 * use default paratmers. 1199 * use default parameters.
832 * 1200 *
833 **/ 1201 **/
834static inline void 1202static inline void
@@ -1312,10 +1680,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1312 uint32_t bgstat = bgf->bgstat; 1680 uint32_t bgstat = bgf->bgstat;
1313 uint64_t failing_sector = 0; 1681 uint64_t failing_sector = 0;
1314 1682
1315 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx " 1683 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x "
1316 "bgstat=0x%x bghm=0x%x\n", 1684 "bgstat=0x%x bghm=0x%x\n",
1317 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), 1685 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1318 cmd->request->nr_sectors, bgstat, bghm); 1686 blk_rq_sectors(cmd->request), bgstat, bghm);
1319 1687
1320 spin_lock(&_dump_buf_lock); 1688 spin_lock(&_dump_buf_lock);
1321 if (!_dump_buf_done) { 1689 if (!_dump_buf_done) {
@@ -1412,6 +1780,133 @@ out:
1412} 1780}
1413 1781
1414/** 1782/**
1783 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
1784 * @phba: The Hba for which this call is being executed.
1785 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1786 *
1787 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1788 * field of @lpfc_cmd for device with SLI-4 interface spec.
1789 *
1790 * Return codes:
1791 * 1 - Error
1792 * 0 - Success
1793 **/
1794static int
1795lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1796{
1797 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1798 struct scatterlist *sgel = NULL;
1799 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1800 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
1801 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1802 dma_addr_t physaddr;
1803 uint32_t num_bde = 0;
1804 uint32_t dma_len;
1805 uint32_t dma_offset = 0;
1806 int nseg;
1807
1808 /*
1809 * There are three possibilities here - use scatter-gather segment, use
1810 * the single mapping, or neither. Start the lpfc command prep by
1811 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1812 * data bde entry.
1813 */
1814 if (scsi_sg_count(scsi_cmnd)) {
1815 /*
1816 * The driver stores the segment count returned from pci_map_sg
1817 * because this a count of dma-mappings used to map the use_sg
1818 * pages. They are not guaranteed to be the same for those
1819 * architectures that implement an IOMMU.
1820 */
1821
1822 nseg = scsi_dma_map(scsi_cmnd);
1823 if (unlikely(!nseg))
1824 return 1;
1825 sgl += 1;
1826 /* clear the last flag in the fcp_rsp map entry */
1827 sgl->word2 = le32_to_cpu(sgl->word2);
1828 bf_set(lpfc_sli4_sge_last, sgl, 0);
1829 sgl->word2 = cpu_to_le32(sgl->word2);
1830 sgl += 1;
1831
1832 lpfc_cmd->seg_cnt = nseg;
1833 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1834 printk(KERN_ERR "%s: Too many sg segments from "
1835 "dma_map_sg. Config %d, seg_cnt %d\n",
1836 __func__, phba->cfg_sg_seg_cnt,
1837 lpfc_cmd->seg_cnt);
1838 scsi_dma_unmap(scsi_cmnd);
1839 return 1;
1840 }
1841
1842 /*
1843 * The driver established a maximum scatter-gather segment count
1844 * during probe that limits the number of sg elements in any
1845 * single scsi command. Just run through the seg_cnt and format
1846 * the sge's.
1847 * When using SLI-3 the driver will try to fit all the BDEs into
1848 * the IOCB. If it can't then the BDEs get added to a BPL as it
1849 * does for SLI-2 mode.
1850 */
1851 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1852 physaddr = sg_dma_address(sgel);
1853 dma_len = sg_dma_len(sgel);
1854 bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
1855 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1856 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1857 if ((num_bde + 1) == nseg)
1858 bf_set(lpfc_sli4_sge_last, sgl, 1);
1859 else
1860 bf_set(lpfc_sli4_sge_last, sgl, 0);
1861 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1862 sgl->word2 = cpu_to_le32(sgl->word2);
1863 sgl->word3 = cpu_to_le32(sgl->word3);
1864 dma_offset += dma_len;
1865 sgl++;
1866 }
1867 } else {
1868 sgl += 1;
1869 /* clear the last flag in the fcp_rsp map entry */
1870 sgl->word2 = le32_to_cpu(sgl->word2);
1871 bf_set(lpfc_sli4_sge_last, sgl, 1);
1872 sgl->word2 = cpu_to_le32(sgl->word2);
1873 }
1874
1875 /*
1876 * Finish initializing those IOCB fields that are dependent on the
1877 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1878 * explicitly reinitialized.
1879 * all iocb memory resources are reused.
1880 */
1881 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1882
1883 /*
1884 * Due to difference in data length between DIF/non-DIF paths,
1885 * we need to set word 4 of IOCB here
1886 */
1887 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1888 return 0;
1889}
1890
1891/**
1892 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
1893 * @phba: The Hba for which this call is being executed.
1894 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1895 *
1896 * This routine wraps the actual DMA mapping function pointer from the
1897 * lpfc_hba struct.
1898 *
1899 * Return codes:
1900 * 1 - Error
1901 * 0 - Success
1902 **/
1903static inline int
1904lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1905{
1906 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1907}
1908
1909/**
1415 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 1910 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
1416 * @phba: Pointer to hba context object. 1911 * @phba: Pointer to hba context object.
1417 * @vport: Pointer to vport object. 1912 * @vport: Pointer to vport object.
@@ -1504,15 +1999,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1504} 1999}
1505 2000
1506/** 2001/**
1507 * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather 2002 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
1508 * @phba: The Hba for which this call is being executed. 2003 * @phba: The HBA for which this call is being executed.
1509 * @psb: The scsi buffer which is going to be un-mapped. 2004 * @psb: The scsi buffer which is going to be un-mapped.
1510 * 2005 *
1511 * This routine does DMA un-mapping of scatter gather list of scsi command 2006 * This routine does DMA un-mapping of scatter gather list of scsi command
1512 * field of @lpfc_cmd. 2007 * field of @lpfc_cmd for device with SLI-3 interface spec.
1513 **/ 2008 **/
1514static void 2009static void
1515lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 2010lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1516{ 2011{
1517 /* 2012 /*
1518 * There are only two special cases to consider. (1) the scsi command 2013 * There are only two special cases to consider. (1) the scsi command
@@ -1676,7 +2171,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1676 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 2171 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
1677 * @phba: The Hba for which this call is being executed. 2172 * @phba: The Hba for which this call is being executed.
1678 * @pIocbIn: The command IOCBQ for the scsi cmnd. 2173 * @pIocbIn: The command IOCBQ for the scsi cmnd.
1679 * @pIocbOut: The response IOCBQ for the scsi cmnd . 2174 * @pIocbOut: The response IOCBQ for the scsi cmnd.
1680 * 2175 *
1681 * This routine assigns scsi command result by looking into response IOCB 2176 * This routine assigns scsi command result by looking into response IOCB
1682 * status field appropriately. This routine handles QUEUE FULL condition as 2177 * status field appropriately. This routine handles QUEUE FULL condition as
@@ -1957,13 +2452,13 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1957} 2452}
1958 2453
1959/** 2454/**
1960 * lpfc_scsi_prep_cmnd - Routine to convert scsi cmnd to FCP information unit 2455 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
1961 * @vport: The virtual port for which this call is being executed. 2456 * @vport: The virtual port for which this call is being executed.
1962 * @lpfc_cmd: The scsi command which needs to send. 2457 * @lpfc_cmd: The scsi command which needs to send.
1963 * @pnode: Pointer to lpfc_nodelist. 2458 * @pnode: Pointer to lpfc_nodelist.
1964 * 2459 *
1965 * This routine initializes fcp_cmnd and iocb data structure from scsi command 2460 * This routine initializes fcp_cmnd and iocb data structure from scsi command
1966 * to transfer. 2461 * to transfer for device with SLI3 interface spec.
1967 **/ 2462 **/
1968static void 2463static void
1969lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 2464lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
@@ -2013,8 +2508,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2013 if (scsi_sg_count(scsi_cmnd)) { 2508 if (scsi_sg_count(scsi_cmnd)) {
2014 if (datadir == DMA_TO_DEVICE) { 2509 if (datadir == DMA_TO_DEVICE) {
2015 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 2510 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
2016 iocb_cmd->un.fcpi.fcpi_parm = 0; 2511 if (phba->sli_rev < LPFC_SLI_REV4) {
2017 iocb_cmd->ulpPU = 0; 2512 iocb_cmd->un.fcpi.fcpi_parm = 0;
2513 iocb_cmd->ulpPU = 0;
2514 } else
2515 iocb_cmd->ulpPU = PARM_READ_CHECK;
2018 fcp_cmnd->fcpCntl3 = WRITE_DATA; 2516 fcp_cmnd->fcpCntl3 = WRITE_DATA;
2019 phba->fc4OutputRequests++; 2517 phba->fc4OutputRequests++;
2020 } else { 2518 } else {
@@ -2051,13 +2549,14 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2051} 2549}
2052 2550
2053/** 2551/**
2054 * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit 2552 * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
2055 * @vport: The virtual port for which this call is being executed. 2553 * @vport: The virtual port for which this call is being executed.
2056 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2554 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2057 * @lun: Logical unit number. 2555 * @lun: Logical unit number.
2058 * @task_mgmt_cmd: SCSI task management command. 2556 * @task_mgmt_cmd: SCSI task management command.
2059 * 2557 *
2060 * This routine creates FCP information unit corresponding to @task_mgmt_cmd. 2558 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2559 * for device with SLI-3 interface spec.
2061 * 2560 *
2062 * Return codes: 2561 * Return codes:
2063 * 0 - Error 2562 * 0 - Error
@@ -2106,14 +2605,56 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2106 * The driver will provide the timeout mechanism. 2605 * The driver will provide the timeout mechanism.
2107 */ 2606 */
2108 piocb->ulpTimeout = 0; 2607 piocb->ulpTimeout = 0;
2109 } else { 2608 } else
2110 piocb->ulpTimeout = lpfc_cmd->timeout; 2609 piocb->ulpTimeout = lpfc_cmd->timeout;
2111 } 2610
2611 if (vport->phba->sli_rev == LPFC_SLI_REV4)
2612 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
2112 2613
2113 return 1; 2614 return 1;
2114} 2615}
2115 2616
2116/** 2617/**
2618 * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
2619 * @phba: The hba struct for which this call is being executed.
2620 * @dev_grp: The HBA PCI-Device group number.
2621 *
2622 * This routine sets up the SCSI interface API function jump table in @phba
2623 * struct.
2624 * Returns: 0 - success, -ENODEV - failure.
2625 **/
2626int
2627lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2628{
2629
2630 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
2631 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
2632 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2633
2634 switch (dev_grp) {
2635 case LPFC_PCI_DEV_LP:
2636 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
2637 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
2638 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2639 break;
2640 case LPFC_PCI_DEV_OC:
2641 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2642 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
2643 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2644 break;
2645 default:
2646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2647 "1418 Invalid HBA PCI-device group: 0x%x\n",
2648 dev_grp);
2649 return -ENODEV;
2650 break;
2651 }
2652 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2653 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
2654 return 0;
2655}
2656
2657/**
2117 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command 2658 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
2118 * @phba: The Hba for which this call is being executed. 2659 * @phba: The Hba for which this call is being executed.
2119 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 2660 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
@@ -2135,73 +2676,6 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
2135} 2676}
2136 2677
2137/** 2678/**
2138 * lpfc_scsi_tgt_reset - Target reset handler
2139 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
2140 * @vport: The virtual port for which this call is being executed.
2141 * @tgt_id: Target ID.
2142 * @lun: Lun number.
2143 * @rdata: Pointer to lpfc_rport_data.
2144 *
2145 * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
2146 *
2147 * Return Code:
2148 * 0x2003 - Error
2149 * 0x2002 - Success.
2150 **/
2151static int
2152lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
2153 unsigned tgt_id, unsigned int lun,
2154 struct lpfc_rport_data *rdata)
2155{
2156 struct lpfc_hba *phba = vport->phba;
2157 struct lpfc_iocbq *iocbq;
2158 struct lpfc_iocbq *iocbqrsp;
2159 int ret;
2160 int status;
2161
2162 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
2163 return FAILED;
2164
2165 lpfc_cmd->rdata = rdata;
2166 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
2167 FCP_TARGET_RESET);
2168 if (!status)
2169 return FAILED;
2170
2171 iocbq = &lpfc_cmd->cur_iocbq;
2172 iocbqrsp = lpfc_sli_get_iocbq(phba);
2173
2174 if (!iocbqrsp)
2175 return FAILED;
2176
2177 /* Issue Target Reset to TGT <num> */
2178 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2179 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
2180 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
2181 status = lpfc_sli_issue_iocb_wait(phba,
2182 &phba->sli.ring[phba->sli.fcp_ring],
2183 iocbq, iocbqrsp, lpfc_cmd->timeout);
2184 if (status != IOCB_SUCCESS) {
2185 if (status == IOCB_TIMEDOUT) {
2186 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
2187 ret = TIMEOUT_ERROR;
2188 } else
2189 ret = FAILED;
2190 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2191 } else {
2192 ret = SUCCESS;
2193 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
2194 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
2195 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
2196 (lpfc_cmd->result & IOERR_DRVR_MASK))
2197 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2198 }
2199
2200 lpfc_sli_release_iocbq(phba, iocbqrsp);
2201 return ret;
2202}
2203
2204/**
2205 * lpfc_info - Info entry point of scsi_host_template data structure 2679 * lpfc_info - Info entry point of scsi_host_template data structure
2206 * @host: The scsi host for which this call is being executed. 2680 * @host: The scsi host for which this call is being executed.
2207 * 2681 *
@@ -2305,7 +2779,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2305 struct Scsi_Host *shost = cmnd->device->host; 2779 struct Scsi_Host *shost = cmnd->device->host;
2306 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2780 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2307 struct lpfc_hba *phba = vport->phba; 2781 struct lpfc_hba *phba = vport->phba;
2308 struct lpfc_sli *psli = &phba->sli;
2309 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 2782 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2310 struct lpfc_nodelist *ndlp = rdata->pnode; 2783 struct lpfc_nodelist *ndlp = rdata->pnode;
2311 struct lpfc_scsi_buf *lpfc_cmd; 2784 struct lpfc_scsi_buf *lpfc_cmd;
@@ -2378,15 +2851,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2378 if (cmnd->cmnd[0] == READ_10) 2851 if (cmnd->cmnd[0] == READ_10)
2379 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2852 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2380 "9035 BLKGRD: READ @ sector %llu, " 2853 "9035 BLKGRD: READ @ sector %llu, "
2381 "count %lu\n", 2854 "count %u\n",
2382 (unsigned long long)scsi_get_lba(cmnd), 2855 (unsigned long long)scsi_get_lba(cmnd),
2383 cmnd->request->nr_sectors); 2856 blk_rq_sectors(cmnd->request));
2384 else if (cmnd->cmnd[0] == WRITE_10) 2857 else if (cmnd->cmnd[0] == WRITE_10)
2385 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2858 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2386 "9036 BLKGRD: WRITE @ sector %llu, " 2859 "9036 BLKGRD: WRITE @ sector %llu, "
2387 "count %lu cmd=%p\n", 2860 "count %u cmd=%p\n",
2388 (unsigned long long)scsi_get_lba(cmnd), 2861 (unsigned long long)scsi_get_lba(cmnd),
2389 cmnd->request->nr_sectors, 2862 blk_rq_sectors(cmnd->request),
2390 cmnd); 2863 cmnd);
2391 2864
2392 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 2865 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -2406,15 +2879,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2406 if (cmnd->cmnd[0] == READ_10) 2879 if (cmnd->cmnd[0] == READ_10)
2407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2880 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2408 "9040 dbg: READ @ sector %llu, " 2881 "9040 dbg: READ @ sector %llu, "
2409 "count %lu\n", 2882 "count %u\n",
2410 (unsigned long long)scsi_get_lba(cmnd), 2883 (unsigned long long)scsi_get_lba(cmnd),
2411 cmnd->request->nr_sectors); 2884 blk_rq_sectors(cmnd->request));
2412 else if (cmnd->cmnd[0] == WRITE_10) 2885 else if (cmnd->cmnd[0] == WRITE_10)
2413 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2886 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2414 "9041 dbg: WRITE @ sector %llu, " 2887 "9041 dbg: WRITE @ sector %llu, "
2415 "count %lu cmd=%p\n", 2888 "count %u cmd=%p\n",
2416 (unsigned long long)scsi_get_lba(cmnd), 2889 (unsigned long long)scsi_get_lba(cmnd),
2417 cmnd->request->nr_sectors, cmnd); 2890 blk_rq_sectors(cmnd->request), cmnd);
2418 else 2891 else
2419 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2892 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2420 "9042 dbg: parser not implemented\n"); 2893 "9042 dbg: parser not implemented\n");
@@ -2427,7 +2900,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2427 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 2900 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
2428 2901
2429 atomic_inc(&ndlp->cmd_pending); 2902 atomic_inc(&ndlp->cmd_pending);
2430 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 2903 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
2431 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 2904 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
2432 if (err) { 2905 if (err) {
2433 atomic_dec(&ndlp->cmd_pending); 2906 atomic_dec(&ndlp->cmd_pending);
@@ -2490,7 +2963,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2490 struct Scsi_Host *shost = cmnd->device->host; 2963 struct Scsi_Host *shost = cmnd->device->host;
2491 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2964 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2492 struct lpfc_hba *phba = vport->phba; 2965 struct lpfc_hba *phba = vport->phba;
2493 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
2494 struct lpfc_iocbq *iocb; 2966 struct lpfc_iocbq *iocb;
2495 struct lpfc_iocbq *abtsiocb; 2967 struct lpfc_iocbq *abtsiocb;
2496 struct lpfc_scsi_buf *lpfc_cmd; 2968 struct lpfc_scsi_buf *lpfc_cmd;
@@ -2531,7 +3003,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2531 icmd = &abtsiocb->iocb; 3003 icmd = &abtsiocb->iocb;
2532 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 3004 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
2533 icmd->un.acxri.abortContextTag = cmd->ulpContext; 3005 icmd->un.acxri.abortContextTag = cmd->ulpContext;
2534 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 3006 if (phba->sli_rev == LPFC_SLI_REV4)
3007 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
3008 else
3009 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
2535 3010
2536 icmd->ulpLe = 1; 3011 icmd->ulpLe = 1;
2537 icmd->ulpClass = cmd->ulpClass; 3012 icmd->ulpClass = cmd->ulpClass;
@@ -2542,7 +3017,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2542 3017
2543 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 3018 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2544 abtsiocb->vport = vport; 3019 abtsiocb->vport = vport;
2545 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 3020 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
3021 IOCB_ERROR) {
2546 lpfc_sli_release_iocbq(phba, abtsiocb); 3022 lpfc_sli_release_iocbq(phba, abtsiocb);
2547 ret = FAILED; 3023 ret = FAILED;
2548 goto out; 3024 goto out;
@@ -2579,157 +3055,334 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2579 return ret; 3055 return ret;
2580} 3056}
2581 3057
3058static char *
3059lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
3060{
3061 switch (task_mgmt_cmd) {
3062 case FCP_ABORT_TASK_SET:
3063 return "ABORT_TASK_SET";
3064 case FCP_CLEAR_TASK_SET:
3065 return "FCP_CLEAR_TASK_SET";
3066 case FCP_BUS_RESET:
3067 return "FCP_BUS_RESET";
3068 case FCP_LUN_RESET:
3069 return "FCP_LUN_RESET";
3070 case FCP_TARGET_RESET:
3071 return "FCP_TARGET_RESET";
3072 case FCP_CLEAR_ACA:
3073 return "FCP_CLEAR_ACA";
3074 case FCP_TERMINATE_TASK:
3075 return "FCP_TERMINATE_TASK";
3076 default:
3077 return "unknown";
3078 }
3079}
3080
2582/** 3081/**
2583 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point 3082 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
2584 * @cmnd: Pointer to scsi_cmnd data structure. 3083 * @vport: The virtual port for which this call is being executed.
3084 * @rdata: Pointer to remote port local data
3085 * @tgt_id: Target ID of remote device.
3086 * @lun_id: Lun number for the TMF
3087 * @task_mgmt_cmd: type of TMF to send
2585 * 3088 *
2586 * This routine does a device reset by sending a TARGET_RESET task management 3089 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
2587 * command. 3090 * a remote port.
2588 * 3091 *
2589 * Return code : 3092 * Return Code:
2590 * 0x2003 - Error 3093 * 0x2003 - Error
2591 * 0x2002 - Success 3094 * 0x2002 - Success.
2592 **/ 3095 **/
2593static int 3096static int
2594lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 3097lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3098 unsigned tgt_id, unsigned int lun_id,
3099 uint8_t task_mgmt_cmd)
2595{ 3100{
2596 struct Scsi_Host *shost = cmnd->device->host;
2597 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2598 struct lpfc_hba *phba = vport->phba; 3101 struct lpfc_hba *phba = vport->phba;
2599 struct lpfc_scsi_buf *lpfc_cmd; 3102 struct lpfc_scsi_buf *lpfc_cmd;
2600 struct lpfc_iocbq *iocbq, *iocbqrsp; 3103 struct lpfc_iocbq *iocbq;
2601 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3104 struct lpfc_iocbq *iocbqrsp;
2602 struct lpfc_nodelist *pnode = rdata->pnode; 3105 int ret;
2603 unsigned long later;
2604 int ret = SUCCESS;
2605 int status; 3106 int status;
2606 int cnt;
2607 struct lpfc_scsi_event_header scsi_event;
2608 3107
2609 lpfc_block_error_handler(cmnd); 3108 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
2610 /*
2611 * If target is not in a MAPPED state, delay the reset until
2612 * target is rediscovered or devloss timeout expires.
2613 */
2614 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2615 while (time_after(later, jiffies)) {
2616 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
2617 return FAILED;
2618 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
2619 break;
2620 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
2621 rdata = cmnd->device->hostdata;
2622 if (!rdata)
2623 break;
2624 pnode = rdata->pnode;
2625 }
2626
2627 scsi_event.event_type = FC_REG_SCSI_EVENT;
2628 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
2629 scsi_event.lun = 0;
2630 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
2631 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
2632
2633 fc_host_post_vendor_event(shost,
2634 fc_get_event_number(),
2635 sizeof(scsi_event),
2636 (char *)&scsi_event,
2637 LPFC_NL_VENDOR_ID);
2638
2639 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
2640 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2641 "0721 LUN Reset rport "
2642 "failure: msec x%x rdata x%p\n",
2643 jiffies_to_msecs(jiffies - later), rdata);
2644 return FAILED; 3109 return FAILED;
2645 } 3110
2646 lpfc_cmd = lpfc_get_scsi_buf(phba); 3111 lpfc_cmd = lpfc_get_scsi_buf(phba);
2647 if (lpfc_cmd == NULL) 3112 if (lpfc_cmd == NULL)
2648 return FAILED; 3113 return FAILED;
2649 lpfc_cmd->timeout = 60; 3114 lpfc_cmd->timeout = 60;
2650 lpfc_cmd->rdata = rdata; 3115 lpfc_cmd->rdata = rdata;
2651 3116
2652 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, 3117 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
2653 cmnd->device->lun, 3118 task_mgmt_cmd);
2654 FCP_TARGET_RESET);
2655 if (!status) { 3119 if (!status) {
2656 lpfc_release_scsi_buf(phba, lpfc_cmd); 3120 lpfc_release_scsi_buf(phba, lpfc_cmd);
2657 return FAILED; 3121 return FAILED;
2658 } 3122 }
2659 iocbq = &lpfc_cmd->cur_iocbq;
2660 3123
2661 /* get a buffer for this IOCB command response */ 3124 iocbq = &lpfc_cmd->cur_iocbq;
2662 iocbqrsp = lpfc_sli_get_iocbq(phba); 3125 iocbqrsp = lpfc_sli_get_iocbq(phba);
2663 if (iocbqrsp == NULL) { 3126 if (iocbqrsp == NULL) {
2664 lpfc_release_scsi_buf(phba, lpfc_cmd); 3127 lpfc_release_scsi_buf(phba, lpfc_cmd);
2665 return FAILED; 3128 return FAILED;
2666 } 3129 }
3130
2667 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3131 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2668 "0703 Issue target reset to TGT %d LUN %d " 3132 "0702 Issue %s to TGT %d LUN %d "
2669 "rpi x%x nlp_flag x%x\n", cmnd->device->id, 3133 "rpi x%x nlp_flag x%x\n",
2670 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 3134 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
2671 status = lpfc_sli_issue_iocb_wait(phba, 3135 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
2672 &phba->sli.ring[phba->sli.fcp_ring], 3136
3137 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
2673 iocbq, iocbqrsp, lpfc_cmd->timeout); 3138 iocbq, iocbqrsp, lpfc_cmd->timeout);
2674 if (status == IOCB_TIMEDOUT) { 3139 if (status != IOCB_SUCCESS) {
2675 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 3140 if (status == IOCB_TIMEDOUT) {
2676 ret = TIMEOUT_ERROR; 3141 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
2677 } else { 3142 ret = TIMEOUT_ERROR;
2678 if (status != IOCB_SUCCESS) 3143 } else
2679 ret = FAILED; 3144 ret = FAILED;
2680 lpfc_release_scsi_buf(phba, lpfc_cmd); 3145 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2681 } 3146 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2682 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3147 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
2683 "0713 SCSI layer issued device reset (%d, %d) " 3148 lpfc_taskmgmt_name(task_mgmt_cmd),
2684 "return x%x status x%x result x%x\n", 3149 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
2685 cmnd->device->id, cmnd->device->lun, ret,
2686 iocbqrsp->iocb.ulpStatus,
2687 iocbqrsp->iocb.un.ulpWord[4]); 3150 iocbqrsp->iocb.un.ulpWord[4]);
3151 } else
3152 ret = SUCCESS;
3153
2688 lpfc_sli_release_iocbq(phba, iocbqrsp); 3154 lpfc_sli_release_iocbq(phba, iocbqrsp);
2689 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun, 3155
2690 LPFC_CTX_TGT); 3156 if (ret != TIMEOUT_ERROR)
3157 lpfc_release_scsi_buf(phba, lpfc_cmd);
3158
3159 return ret;
3160}
3161
3162/**
3163 * lpfc_chk_tgt_mapped -
3164 * @vport: The virtual port to check on
3165 * @cmnd: Pointer to scsi_cmnd data structure.
3166 *
3167 * This routine delays until the scsi target (aka rport) for the
3168 * command exists (is present and logged in) or we declare it non-existent.
3169 *
3170 * Return code :
3171 * 0x2003 - Error
3172 * 0x2002 - Success
3173 **/
3174static int
3175lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
3176{
3177 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3178 struct lpfc_nodelist *pnode = rdata->pnode;
3179 unsigned long later;
3180
3181 /*
3182 * If target is not in a MAPPED state, delay until
3183 * target is rediscovered or devloss timeout expires.
3184 */
3185 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3186 while (time_after(later, jiffies)) {
3187 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3188 return FAILED;
3189 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
3190 return SUCCESS;
3191 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
3192 rdata = cmnd->device->hostdata;
3193 if (!rdata)
3194 return FAILED;
3195 pnode = rdata->pnode;
3196 }
3197 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
3198 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3199 return FAILED;
3200 return SUCCESS;
3201}
3202
3203/**
3204 * lpfc_reset_flush_io_context -
3205 * @vport: The virtual port (scsi_host) for the flush context
3206 * @tgt_id: If aborting by Target contect - specifies the target id
3207 * @lun_id: If aborting by Lun context - specifies the lun id
3208 * @context: specifies the context level to flush at.
3209 *
3210 * After a reset condition via TMF, we need to flush orphaned i/o
3211 * contexts from the adapter. This routine aborts any contexts
3212 * outstanding, then waits for their completions. The wait is
3213 * bounded by devloss_tmo though.
3214 *
3215 * Return code :
3216 * 0x2003 - Error
3217 * 0x2002 - Success
3218 **/
3219static int
3220lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
3221 uint64_t lun_id, lpfc_ctx_cmd context)
3222{
3223 struct lpfc_hba *phba = vport->phba;
3224 unsigned long later;
3225 int cnt;
3226
3227 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
2691 if (cnt) 3228 if (cnt)
2692 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 3229 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
2693 cmnd->device->id, cmnd->device->lun, 3230 tgt_id, lun_id, context);
2694 LPFC_CTX_TGT);
2695 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 3231 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2696 while (time_after(later, jiffies) && cnt) { 3232 while (time_after(later, jiffies) && cnt) {
2697 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); 3233 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
2698 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, 3234 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
2699 cmnd->device->lun, LPFC_CTX_TGT);
2700 } 3235 }
2701 if (cnt) { 3236 if (cnt) {
2702 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3237 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2703 "0719 device reset I/O flush failure: " 3238 "0724 I/O flush failure for context %s : cnt x%x\n",
2704 "cnt x%x\n", cnt); 3239 ((context == LPFC_CTX_LUN) ? "LUN" :
2705 ret = FAILED; 3240 ((context == LPFC_CTX_TGT) ? "TGT" :
3241 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
3242 cnt);
3243 return FAILED;
2706 } 3244 }
2707 return ret; 3245 return SUCCESS;
3246}
3247
3248/**
3249 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
3250 * @cmnd: Pointer to scsi_cmnd data structure.
3251 *
3252 * This routine does a device reset by sending a LUN_RESET task management
3253 * command.
3254 *
3255 * Return code :
3256 * 0x2003 - Error
3257 * 0x2002 - Success
3258 **/
3259static int
3260lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
3261{
3262 struct Scsi_Host *shost = cmnd->device->host;
3263 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3264 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3265 struct lpfc_nodelist *pnode = rdata->pnode;
3266 unsigned tgt_id = cmnd->device->id;
3267 unsigned int lun_id = cmnd->device->lun;
3268 struct lpfc_scsi_event_header scsi_event;
3269 int status;
3270
3271 lpfc_block_error_handler(cmnd);
3272
3273 status = lpfc_chk_tgt_mapped(vport, cmnd);
3274 if (status == FAILED) {
3275 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3276 "0721 Device Reset rport failure: rdata x%p\n", rdata);
3277 return FAILED;
3278 }
3279
3280 scsi_event.event_type = FC_REG_SCSI_EVENT;
3281 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
3282 scsi_event.lun = lun_id;
3283 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3284 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3285
3286 fc_host_post_vendor_event(shost, fc_get_event_number(),
3287 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3288
3289 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3290 FCP_LUN_RESET);
3291
3292 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3293 "0713 SCSI layer issued Device Reset (%d, %d) "
3294 "return x%x\n", tgt_id, lun_id, status);
3295
3296 /*
3297 * We have to clean up i/o as : they may be orphaned by the TMF;
3298 * or if the TMF failed, they may be in an indeterminate state.
3299 * So, continue on.
3300 * We will report success if all the i/o aborts successfully.
3301 */
3302 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3303 LPFC_CTX_LUN);
3304 return status;
3305}
3306
3307/**
3308 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
3309 * @cmnd: Pointer to scsi_cmnd data structure.
3310 *
3311 * This routine does a target reset by sending a TARGET_RESET task management
3312 * command.
3313 *
3314 * Return code :
3315 * 0x2003 - Error
3316 * 0x2002 - Success
3317 **/
3318static int
3319lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
3320{
3321 struct Scsi_Host *shost = cmnd->device->host;
3322 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3323 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3324 struct lpfc_nodelist *pnode = rdata->pnode;
3325 unsigned tgt_id = cmnd->device->id;
3326 unsigned int lun_id = cmnd->device->lun;
3327 struct lpfc_scsi_event_header scsi_event;
3328 int status;
3329
3330 lpfc_block_error_handler(cmnd);
3331
3332 status = lpfc_chk_tgt_mapped(vport, cmnd);
3333 if (status == FAILED) {
3334 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3335 "0722 Target Reset rport failure: rdata x%p\n", rdata);
3336 return FAILED;
3337 }
3338
3339 scsi_event.event_type = FC_REG_SCSI_EVENT;
3340 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
3341 scsi_event.lun = 0;
3342 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3343 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3344
3345 fc_host_post_vendor_event(shost, fc_get_event_number(),
3346 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3347
3348 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3349 FCP_TARGET_RESET);
3350
3351 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3352 "0723 SCSI layer issued Target Reset (%d, %d) "
3353 "return x%x\n", tgt_id, lun_id, status);
3354
3355 /*
3356 * We have to clean up i/o as : they may be orphaned by the TMF;
3357 * or if the TMF failed, they may be in an indeterminate state.
3358 * So, continue on.
3359 * We will report success if all the i/o aborts successfully.
3360 */
3361 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3362 LPFC_CTX_TGT);
3363 return status;
2708} 3364}
2709 3365
2710/** 3366/**
2711 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point 3367 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
2712 * @cmnd: Pointer to scsi_cmnd data structure. 3368 * @cmnd: Pointer to scsi_cmnd data structure.
2713 * 3369 *
2714 * This routine does target reset to all target on @cmnd->device->host. 3370 * This routine does target reset to all targets on @cmnd->device->host.
3371 * This emulates Parallel SCSI Bus Reset Semantics.
2715 * 3372 *
2716 * Return Code: 3373 * Return code :
2717 * 0x2003 - Error 3374 * 0x2003 - Error
2718 * 0x2002 - Success 3375 * 0x2002 - Success
2719 **/ 3376 **/
2720static int 3377static int
2721lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 3378lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
2722{ 3379{
2723 struct Scsi_Host *shost = cmnd->device->host; 3380 struct Scsi_Host *shost = cmnd->device->host;
2724 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3381 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2725 struct lpfc_hba *phba = vport->phba;
2726 struct lpfc_nodelist *ndlp = NULL; 3382 struct lpfc_nodelist *ndlp = NULL;
2727 int match;
2728 int ret = SUCCESS, status = SUCCESS, i;
2729 int cnt;
2730 struct lpfc_scsi_buf * lpfc_cmd;
2731 unsigned long later;
2732 struct lpfc_scsi_event_header scsi_event; 3383 struct lpfc_scsi_event_header scsi_event;
3384 int match;
3385 int ret = SUCCESS, status, i;
2733 3386
2734 scsi_event.event_type = FC_REG_SCSI_EVENT; 3387 scsi_event.event_type = FC_REG_SCSI_EVENT;
2735 scsi_event.subcategory = LPFC_EVENT_BUSRESET; 3388 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
@@ -2737,13 +3390,11 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
2737 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); 3390 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
2738 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); 3391 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
2739 3392
2740 fc_host_post_vendor_event(shost, 3393 fc_host_post_vendor_event(shost, fc_get_event_number(),
2741 fc_get_event_number(), 3394 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
2742 sizeof(scsi_event),
2743 (char *)&scsi_event,
2744 LPFC_NL_VENDOR_ID);
2745 3395
2746 lpfc_block_error_handler(cmnd); 3396 lpfc_block_error_handler(cmnd);
3397
2747 /* 3398 /*
2748 * Since the driver manages a single bus device, reset all 3399 * Since the driver manages a single bus device, reset all
2749 * targets known to the driver. Should any target reset 3400 * targets known to the driver. Should any target reset
@@ -2766,16 +3417,11 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
2766 spin_unlock_irq(shost->host_lock); 3417 spin_unlock_irq(shost->host_lock);
2767 if (!match) 3418 if (!match)
2768 continue; 3419 continue;
2769 lpfc_cmd = lpfc_get_scsi_buf(phba); 3420
2770 if (lpfc_cmd) { 3421 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
2771 lpfc_cmd->timeout = 60; 3422 i, 0, FCP_TARGET_RESET);
2772 status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, 3423
2773 cmnd->device->lun, 3424 if (status != SUCCESS) {
2774 ndlp->rport->dd_data);
2775 if (status != TIMEOUT_ERROR)
2776 lpfc_release_scsi_buf(phba, lpfc_cmd);
2777 }
2778 if (!lpfc_cmd || status != SUCCESS) {
2779 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3425 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2780 "0700 Bus Reset on target %d failed\n", 3426 "0700 Bus Reset on target %d failed\n",
2781 i); 3427 i);
@@ -2783,25 +3429,16 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
2783 } 3429 }
2784 } 3430 }
2785 /* 3431 /*
2786 * All outstanding txcmplq I/Os should have been aborted by 3432 * We have to clean up i/o as : they may be orphaned by the TMFs
2787 * the targets. Unfortunately, some targets do not abide by 3433 * above; or if any of the TMFs failed, they may be in an
2788 * this forcing the driver to double check. 3434 * indeterminate state.
3435 * We will report success if all the i/o aborts successfully.
2789 */ 3436 */
2790 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); 3437
2791 if (cnt) 3438 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
2792 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 3439 if (status != SUCCESS)
2793 0, 0, LPFC_CTX_HOST);
2794 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2795 while (time_after(later, jiffies) && cnt) {
2796 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
2797 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
2798 }
2799 if (cnt) {
2800 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2801 "0715 Bus Reset I/O flush failure: "
2802 "cnt x%x left x%x\n", cnt, i);
2803 ret = FAILED; 3440 ret = FAILED;
2804 } 3441
2805 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3442 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2806 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 3443 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
2807 return ret; 3444 return ret;
@@ -2825,11 +3462,10 @@ lpfc_slave_alloc(struct scsi_device *sdev)
2825{ 3462{
2826 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3463 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2827 struct lpfc_hba *phba = vport->phba; 3464 struct lpfc_hba *phba = vport->phba;
2828 struct lpfc_scsi_buf *scsi_buf = NULL;
2829 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 3465 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2830 uint32_t total = 0, i; 3466 uint32_t total = 0;
2831 uint32_t num_to_alloc = 0; 3467 uint32_t num_to_alloc = 0;
2832 unsigned long flags; 3468 int num_allocated = 0;
2833 3469
2834 if (!rport || fc_remote_port_chkready(rport)) 3470 if (!rport || fc_remote_port_chkready(rport))
2835 return -ENXIO; 3471 return -ENXIO;
@@ -2863,20 +3499,13 @@ lpfc_slave_alloc(struct scsi_device *sdev)
2863 (phba->cfg_hba_queue_depth - total)); 3499 (phba->cfg_hba_queue_depth - total));
2864 num_to_alloc = phba->cfg_hba_queue_depth - total; 3500 num_to_alloc = phba->cfg_hba_queue_depth - total;
2865 } 3501 }
2866 3502 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
2867 for (i = 0; i < num_to_alloc; i++) { 3503 if (num_to_alloc != num_allocated) {
2868 scsi_buf = lpfc_new_scsi_buf(vport); 3504 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2869 if (!scsi_buf) { 3505 "0708 Allocation request of %d "
2870 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3506 "command buffers did not succeed. "
2871 "0706 Failed to allocate " 3507 "Allocated %d buffers.\n",
2872 "command buffer\n"); 3508 num_to_alloc, num_allocated);
2873 break;
2874 }
2875
2876 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
2877 phba->total_scsi_bufs++;
2878 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
2879 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
2880 } 3509 }
2881 return 0; 3510 return 0;
2882} 3511}
@@ -2942,7 +3571,8 @@ struct scsi_host_template lpfc_template = {
2942 .info = lpfc_info, 3571 .info = lpfc_info,
2943 .queuecommand = lpfc_queuecommand, 3572 .queuecommand = lpfc_queuecommand,
2944 .eh_abort_handler = lpfc_abort_handler, 3573 .eh_abort_handler = lpfc_abort_handler,
2945 .eh_device_reset_handler= lpfc_device_reset_handler, 3574 .eh_device_reset_handler = lpfc_device_reset_handler,
3575 .eh_target_reset_handler = lpfc_target_reset_handler,
2946 .eh_bus_reset_handler = lpfc_bus_reset_handler, 3576 .eh_bus_reset_handler = lpfc_bus_reset_handler,
2947 .slave_alloc = lpfc_slave_alloc, 3577 .slave_alloc = lpfc_slave_alloc,
2948 .slave_configure = lpfc_slave_configure, 3578 .slave_configure = lpfc_slave_configure,
@@ -2962,7 +3592,8 @@ struct scsi_host_template lpfc_vport_template = {
2962 .info = lpfc_info, 3592 .info = lpfc_info,
2963 .queuecommand = lpfc_queuecommand, 3593 .queuecommand = lpfc_queuecommand,
2964 .eh_abort_handler = lpfc_abort_handler, 3594 .eh_abort_handler = lpfc_abort_handler,
2965 .eh_device_reset_handler= lpfc_device_reset_handler, 3595 .eh_device_reset_handler = lpfc_device_reset_handler,
3596 .eh_target_reset_handler = lpfc_target_reset_handler,
2966 .eh_bus_reset_handler = lpfc_bus_reset_handler, 3597 .eh_bus_reset_handler = lpfc_bus_reset_handler,
2967 .slave_alloc = lpfc_slave_alloc, 3598 .slave_alloc = lpfc_slave_alloc,
2968 .slave_configure = lpfc_slave_configure, 3599 .slave_configure = lpfc_slave_configure,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index c7c440d5fa29..65dfc8bd5b49 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -140,6 +140,8 @@ struct lpfc_scsi_buf {
140 struct fcp_rsp *fcp_rsp; 140 struct fcp_rsp *fcp_rsp;
141 struct ulp_bde64 *fcp_bpl; 141 struct ulp_bde64 *fcp_bpl;
142 142
143 dma_addr_t dma_phys_bpl;
144
143 /* cur_iocbq has phys of the dma-able buffer. 145 /* cur_iocbq has phys of the dma-able buffer.
144 * Iotag is in here 146 * Iotag is in here
145 */ 147 */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index eb5c75c45ba4..acc43b061ba1 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -29,9 +29,12 @@
29#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h>
32 33
34#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 35#include "lpfc_hw.h"
34#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
35#include "lpfc_nl.h" 38#include "lpfc_nl.h"
36#include "lpfc_disc.h" 39#include "lpfc_disc.h"
37#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
@@ -40,24 +43,7 @@
40#include "lpfc_logmsg.h" 43#include "lpfc_logmsg.h"
41#include "lpfc_compat.h" 44#include "lpfc_compat.h"
42#include "lpfc_debugfs.h" 45#include "lpfc_debugfs.h"
43 46#include "lpfc_vport.h"
44/*
45 * Define macro to log: Mailbox command x%x cannot issue Data
46 * This allows multiple uses of lpfc_msgBlk0311
47 * w/o perturbing log msg utility.
48 */
49#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
50 lpfc_printf_log(phba, \
51 KERN_INFO, \
52 LOG_MBOX | LOG_SLI, \
53 "(%d):0311 Mailbox command x%x cannot " \
54 "issue Data: x%x x%x x%x\n", \
55 pmbox->vport ? pmbox->vport->vpi : 0, \
56 pmbox->mb.mbxCommand, \
57 phba->pport->port_state, \
58 psli->sli_flag, \
59 flag)
60
61 47
62/* There are only four IOCB completion types. */ 48/* There are only four IOCB completion types. */
63typedef enum _lpfc_iocb_type { 49typedef enum _lpfc_iocb_type {
@@ -67,6 +53,350 @@ typedef enum _lpfc_iocb_type {
67 LPFC_ABORT_IOCB 53 LPFC_ABORT_IOCB
68} lpfc_iocb_type; 54} lpfc_iocb_type;
69 55
56
57/* Provide function prototypes local to this module. */
58static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
59 uint32_t);
60static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint8_t *, uint32_t *);
62
63static IOCB_t *
64lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
65{
66 return &iocbq->iocb;
67}
68
69/**
70 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
71 * @q: The Work Queue to operate on.
72 * @wqe: The work Queue Entry to put on the Work queue.
73 *
74 * This routine will copy the contents of @wqe to the next available entry on
75 * the @q. This function will then ring the Work Queue Doorbell to signal the
76 * HBA to start processing the Work Queue Entry. This function returns 0 if
77 * successful. If no entries are available on @q then this function will return
78 * -ENOMEM.
79 * The caller is expected to hold the hbalock when calling this routine.
80 **/
81static uint32_t
82lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
83{
84 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
85 struct lpfc_register doorbell;
86 uint32_t host_index;
87
88 /* If the host has not yet processed the next entry then we are done */
89 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
90 return -ENOMEM;
91 /* set consumption flag every once in a while */
92 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
93 bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
94
95 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
96
97 /* Update the host index before invoking device */
98 host_index = q->host_index;
99 q->host_index = ((q->host_index + 1) % q->entry_count);
100
101 /* Ring Doorbell */
102 doorbell.word0 = 0;
103 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
104 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
105 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
106 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
107 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
108
109 return 0;
110}
111
112/**
113 * lpfc_sli4_wq_release - Updates internal hba index for WQ
114 * @q: The Work Queue to operate on.
115 * @index: The index to advance the hba index to.
116 *
117 * This routine will update the HBA index of a queue to reflect consumption of
118 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
119 * an entry the host calls this function to update the queue's internal
120 * pointers. This routine returns the number of entries that were consumed by
121 * the HBA.
122 **/
123static uint32_t
124lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
125{
126 uint32_t released = 0;
127
128 if (q->hba_index == index)
129 return 0;
130 do {
131 q->hba_index = ((q->hba_index + 1) % q->entry_count);
132 released++;
133 } while (q->hba_index != index);
134 return released;
135}
136
137/**
138 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
139 * @q: The Mailbox Queue to operate on.
140 * @wqe: The Mailbox Queue Entry to put on the Work queue.
141 *
142 * This routine will copy the contents of @mqe to the next available entry on
143 * the @q. This function will then ring the Work Queue Doorbell to signal the
144 * HBA to start processing the Work Queue Entry. This function returns 0 if
145 * successful. If no entries are available on @q then this function will return
146 * -ENOMEM.
147 * The caller is expected to hold the hbalock when calling this routine.
148 **/
149static uint32_t
150lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
151{
152 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
153 struct lpfc_register doorbell;
154 uint32_t host_index;
155
156 /* If the host has not yet processed the next entry then we are done */
157 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
158 return -ENOMEM;
159 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
160 /* Save off the mailbox pointer for completion */
161 q->phba->mbox = (MAILBOX_t *)temp_mqe;
162
163 /* Update the host index before invoking device */
164 host_index = q->host_index;
165 q->host_index = ((q->host_index + 1) % q->entry_count);
166
167 /* Ring Doorbell */
168 doorbell.word0 = 0;
169 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
170 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
171 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
172 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
173 return 0;
174}
175
176/**
177 * lpfc_sli4_mq_release - Updates internal hba index for MQ
178 * @q: The Mailbox Queue to operate on.
179 *
180 * This routine will update the HBA index of a queue to reflect consumption of
181 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
182 * an entry the host calls this function to update the queue's internal
183 * pointers. This routine returns the number of entries that were consumed by
184 * the HBA.
185 **/
186static uint32_t
187lpfc_sli4_mq_release(struct lpfc_queue *q)
188{
189 /* Clear the mailbox pointer for completion */
190 q->phba->mbox = NULL;
191 q->hba_index = ((q->hba_index + 1) % q->entry_count);
192 return 1;
193}
194
195/**
196 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
197 * @q: The Event Queue to get the first valid EQE from
198 *
199 * This routine will get the first valid Event Queue Entry from @q, update
200 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
201 * the Queue (no more work to do), or the Queue is full of EQEs that have been
202 * processed, but not popped back to the HBA then this routine will return NULL.
203 **/
204static struct lpfc_eqe *
205lpfc_sli4_eq_get(struct lpfc_queue *q)
206{
207 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
208
209 /* If the next EQE is not valid then we are done */
210 if (!bf_get(lpfc_eqe_valid, eqe))
211 return NULL;
212 /* If the host has not yet processed the next entry then we are done */
213 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
214 return NULL;
215
216 q->hba_index = ((q->hba_index + 1) % q->entry_count);
217 return eqe;
218}
219
220/**
221 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
222 * @q: The Event Queue that the host has completed processing for.
223 * @arm: Indicates whether the host wants to arms this CQ.
224 *
225 * This routine will mark all Event Queue Entries on @q, from the last
226 * known completed entry to the last entry that was processed, as completed
227 * by clearing the valid bit for each completion queue entry. Then it will
228 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
229 * The internal host index in the @q will be updated by this routine to indicate
230 * that the host has finished processing the entries. The @arm parameter
231 * indicates that the queue should be rearmed when ringing the doorbell.
232 *
233 * This function will return the number of EQEs that were popped.
234 **/
235uint32_t
236lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
237{
238 uint32_t released = 0;
239 struct lpfc_eqe *temp_eqe;
240 struct lpfc_register doorbell;
241
242 /* while there are valid entries */
243 while (q->hba_index != q->host_index) {
244 temp_eqe = q->qe[q->host_index].eqe;
245 bf_set(lpfc_eqe_valid, temp_eqe, 0);
246 released++;
247 q->host_index = ((q->host_index + 1) % q->entry_count);
248 }
249 if (unlikely(released == 0 && !arm))
250 return 0;
251
252 /* ring doorbell for number popped */
253 doorbell.word0 = 0;
254 if (arm) {
255 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
256 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
257 }
258 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
259 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
260 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
261 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
262 return released;
263}
264
265/**
266 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
267 * @q: The Completion Queue to get the first valid CQE from
268 *
269 * This routine will get the first valid Completion Queue Entry from @q, update
270 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
271 * the Queue (no more work to do), or the Queue is full of CQEs that have been
272 * processed, but not popped back to the HBA then this routine will return NULL.
273 **/
274static struct lpfc_cqe *
275lpfc_sli4_cq_get(struct lpfc_queue *q)
276{
277 struct lpfc_cqe *cqe;
278
279 /* If the next CQE is not valid then we are done */
280 if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
281 return NULL;
282 /* If the host has not yet processed the next entry then we are done */
283 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
284 return NULL;
285
286 cqe = q->qe[q->hba_index].cqe;
287 q->hba_index = ((q->hba_index + 1) % q->entry_count);
288 return cqe;
289}
290
291/**
292 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
293 * @q: The Completion Queue that the host has completed processing for.
294 * @arm: Indicates whether the host wants to arms this CQ.
295 *
296 * This routine will mark all Completion queue entries on @q, from the last
297 * known completed entry to the last entry that was processed, as completed
298 * by clearing the valid bit for each completion queue entry. Then it will
299 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
300 * The internal host index in the @q will be updated by this routine to indicate
301 * that the host has finished processing the entries. The @arm parameter
302 * indicates that the queue should be rearmed when ringing the doorbell.
303 *
304 * This function will return the number of CQEs that were released.
305 **/
306uint32_t
307lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
308{
309 uint32_t released = 0;
310 struct lpfc_cqe *temp_qe;
311 struct lpfc_register doorbell;
312
313 /* while there are valid entries */
314 while (q->hba_index != q->host_index) {
315 temp_qe = q->qe[q->host_index].cqe;
316 bf_set(lpfc_cqe_valid, temp_qe, 0);
317 released++;
318 q->host_index = ((q->host_index + 1) % q->entry_count);
319 }
320 if (unlikely(released == 0 && !arm))
321 return 0;
322
323 /* ring doorbell for number popped */
324 doorbell.word0 = 0;
325 if (arm)
326 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
327 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
328 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
329 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
330 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
331 return released;
332}
333
334/**
335 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
336 * @q: The Header Receive Queue to operate on.
337 * @wqe: The Receive Queue Entry to put on the Receive queue.
338 *
339 * This routine will copy the contents of @wqe to the next available entry on
340 * the @q. This function will then ring the Receive Queue Doorbell to signal the
341 * HBA to start processing the Receive Queue Entry. This function returns the
342 * index that the rqe was copied to if successful. If no entries are available
343 * on @q then this function will return -ENOMEM.
344 * The caller is expected to hold the hbalock when calling this routine.
345 **/
346static int
347lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
348 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
349{
350 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
351 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
352 struct lpfc_register doorbell;
353 int put_index = hq->host_index;
354
355 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
356 return -EINVAL;
357 if (hq->host_index != dq->host_index)
358 return -EINVAL;
359 /* If the host has not yet processed the next entry then we are done */
360 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
361 return -EBUSY;
362 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
363 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
364
365 /* Update the host index to point to the next slot */
366 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
367 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
368
369 /* Ring The Header Receive Queue Doorbell */
370 if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
371 doorbell.word0 = 0;
372 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
373 LPFC_RQ_POST_BATCH);
374 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
375 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
376 }
377 return put_index;
378}
379
380/**
381 * lpfc_sli4_rq_release - Updates internal hba index for RQ
382 * @q: The Header Receive Queue to operate on.
383 *
384 * This routine will update the HBA index of a queue to reflect consumption of
385 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
386 * consumed an entry the host calls this function to update the queue's
387 * internal pointers. This routine returns the number of entries that were
388 * consumed by the HBA.
389 **/
390static uint32_t
391lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
392{
393 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
394 return 0;
395 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
396 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
397 return 1;
398}
399
70/** 400/**
71 * lpfc_cmd_iocb - Get next command iocb entry in the ring 401 * lpfc_cmd_iocb - Get next command iocb entry in the ring
72 * @phba: Pointer to HBA context object. 402 * @phba: Pointer to HBA context object.
@@ -121,6 +451,76 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
121} 451}
122 452
123/** 453/**
454 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
455 * @phba: Pointer to HBA context object.
456 * @xritag: XRI value.
457 *
458 * This function clears the sglq pointer from the array of acive
459 * sglq's. The xritag that is passed in is used to index into the
460 * array. Before the xritag can be used it needs to be adjusted
461 * by subtracting the xribase.
462 *
463 * Returns sglq ponter = success, NULL = Failure.
464 **/
465static struct lpfc_sglq *
466__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
467{
468 uint16_t adj_xri;
469 struct lpfc_sglq *sglq;
470 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
471 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
472 return NULL;
473 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
474 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
475 return sglq;
476}
477
478/**
479 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
480 * @phba: Pointer to HBA context object.
481 * @xritag: XRI value.
482 *
483 * This function returns the sglq pointer from the array of acive
484 * sglq's. The xritag that is passed in is used to index into the
485 * array. Before the xritag can be used it needs to be adjusted
486 * by subtracting the xribase.
487 *
488 * Returns sglq ponter = success, NULL = Failure.
489 **/
490static struct lpfc_sglq *
491__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
492{
493 uint16_t adj_xri;
494 struct lpfc_sglq *sglq;
495 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
496 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
497 return NULL;
498 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
499 return sglq;
500}
501
502/**
503 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
504 * @phba: Pointer to HBA context object.
505 *
506 * This function is called with hbalock held. This function
507 * Gets a new driver sglq object from the sglq list. If the
508 * list is not empty then it is successful, it returns pointer to the newly
509 * allocated sglq object else it returns NULL.
510 **/
511static struct lpfc_sglq *
512__lpfc_sli_get_sglq(struct lpfc_hba *phba)
513{
514 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
515 struct lpfc_sglq *sglq = NULL;
516 uint16_t adj_xri;
517 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
518 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
519 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
520 return sglq;
521}
522
523/**
124 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 524 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
125 * @phba: Pointer to HBA context object. 525 * @phba: Pointer to HBA context object.
126 * 526 *
@@ -142,7 +542,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
142} 542}
143 543
144/** 544/**
145 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 545 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
146 * @phba: Pointer to HBA context object. 546 * @phba: Pointer to HBA context object.
147 * @iocbq: Pointer to driver iocb object. 547 * @iocbq: Pointer to driver iocb object.
148 * 548 *
@@ -150,9 +550,62 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
150 * iocb object to the iocb pool. The iotag in the iocb object 550 * iocb object to the iocb pool. The iotag in the iocb object
151 * does not change for each use of the iocb object. This function 551 * does not change for each use of the iocb object. This function
152 * clears all other fields of the iocb object when it is freed. 552 * clears all other fields of the iocb object when it is freed.
553 * The sqlq structure that holds the xritag and phys and virtual
554 * mappings for the scatter gather list is retrieved from the
555 * active array of sglq. The get of the sglq pointer also clears
556 * the entry in the array. If the status of the IO indiactes that
557 * this IO was aborted then the sglq entry it put on the
558 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
559 * IO has good status or fails for any other reason then the sglq
560 * entry is added to the free list (lpfc_sgl_list).
153 **/ 561 **/
154static void 562static void
155__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 563__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
564{
565 struct lpfc_sglq *sglq;
566 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
567 unsigned long iflag;
568
569 if (iocbq->sli4_xritag == NO_XRI)
570 sglq = NULL;
571 else
572 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
573 if (sglq) {
574 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
575 || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
576 && (iocbq->iocb.un.ulpWord[4]
577 == IOERR_SLI_ABORTED))) {
578 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
579 iflag);
580 list_add(&sglq->list,
581 &phba->sli4_hba.lpfc_abts_els_sgl_list);
582 spin_unlock_irqrestore(
583 &phba->sli4_hba.abts_sgl_list_lock, iflag);
584 } else
585 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
586 }
587
588
589 /*
590 * Clean all volatile data fields, preserve iotag and node struct.
591 */
592 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
593 iocbq->sli4_xritag = NO_XRI;
594 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
595}
596
597/**
598 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
599 * @phba: Pointer to HBA context object.
600 * @iocbq: Pointer to driver iocb object.
601 *
602 * This function is called with hbalock held to release driver
603 * iocb object to the iocb pool. The iotag in the iocb object
604 * does not change for each use of the iocb object. This function
605 * clears all other fields of the iocb object when it is freed.
606 **/
607static void
608__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
156{ 609{
157 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 610 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
158 611
@@ -160,10 +613,27 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
160 * Clean all volatile data fields, preserve iotag and node struct. 613 * Clean all volatile data fields, preserve iotag and node struct.
161 */ 614 */
162 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 615 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
616 iocbq->sli4_xritag = NO_XRI;
163 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 617 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
164} 618}
165 619
166/** 620/**
621 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
622 * @phba: Pointer to HBA context object.
623 * @iocbq: Pointer to driver iocb object.
624 *
625 * This function is called with hbalock held to release driver
626 * iocb object to the iocb pool. The iotag in the iocb object
627 * does not change for each use of the iocb object. This function
628 * clears all other fields of the iocb object when it is freed.
629 **/
630static void
631__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
632{
633 phba->__lpfc_sli_release_iocbq(phba, iocbq);
634}
635
636/**
167 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 637 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
168 * @phba: Pointer to HBA context object. 638 * @phba: Pointer to HBA context object.
169 * @iocbq: Pointer to driver iocb object. 639 * @iocbq: Pointer to driver iocb object.
@@ -281,6 +751,14 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
281 case CMD_GEN_REQUEST64_CR: 751 case CMD_GEN_REQUEST64_CR:
282 case CMD_GEN_REQUEST64_CX: 752 case CMD_GEN_REQUEST64_CX:
283 case CMD_XMIT_ELS_RSP64_CX: 753 case CMD_XMIT_ELS_RSP64_CX:
754 case DSSCMD_IWRITE64_CR:
755 case DSSCMD_IWRITE64_CX:
756 case DSSCMD_IREAD64_CR:
757 case DSSCMD_IREAD64_CX:
758 case DSSCMD_INVALIDATE_DEK:
759 case DSSCMD_SET_KEK:
760 case DSSCMD_GET_KEK_ID:
761 case DSSCMD_GEN_XFER:
284 type = LPFC_SOL_IOCB; 762 type = LPFC_SOL_IOCB;
285 break; 763 break;
286 case CMD_ABORT_XRI_CN: 764 case CMD_ABORT_XRI_CN:
@@ -348,7 +826,7 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
348 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 826 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
349 if (!pmb) 827 if (!pmb)
350 return -ENOMEM; 828 return -ENOMEM;
351 pmbox = &pmb->mb; 829 pmbox = &pmb->u.mb;
352 phba->link_state = LPFC_INIT_MBX_CMDS; 830 phba->link_state = LPFC_INIT_MBX_CMDS;
353 for (i = 0; i < psli->num_rings; i++) { 831 for (i = 0; i < psli->num_rings; i++) {
354 lpfc_config_ring(phba, i, pmb); 832 lpfc_config_ring(phba, i, pmb);
@@ -779,8 +1257,8 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
779 phba->hbqs[i].buffer_count = 0; 1257 phba->hbqs[i].buffer_count = 0;
780 } 1258 }
781 /* Return all HBQ buffer that are in-fly */ 1259 /* Return all HBQ buffer that are in-fly */
782 list_for_each_entry_safe(dmabuf, next_dmabuf, 1260 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
783 &phba->hbqbuf_in_list, list) { 1261 list) {
784 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1262 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
785 list_del(&hbq_buf->dbuf.list); 1263 list_del(&hbq_buf->dbuf.list);
786 if (hbq_buf->tag == -1) { 1264 if (hbq_buf->tag == -1) {
@@ -814,10 +1292,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
814 * pointer to the hbq entry if it successfully post the buffer 1292 * pointer to the hbq entry if it successfully post the buffer
815 * else it will return NULL. 1293 * else it will return NULL.
816 **/ 1294 **/
817static struct lpfc_hbq_entry * 1295static int
818lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1296lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
819 struct hbq_dmabuf *hbq_buf) 1297 struct hbq_dmabuf *hbq_buf)
820{ 1298{
1299 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1300}
1301
1302/**
1303 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1304 * @phba: Pointer to HBA context object.
1305 * @hbqno: HBQ number.
1306 * @hbq_buf: Pointer to HBQ buffer.
1307 *
1308 * This function is called with the hbalock held to post a hbq buffer to the
1309 * firmware. If the function finds an empty slot in the HBQ, it will post the
1310 * buffer and place it on the hbq_buffer_list. The function will return zero if
1311 * it successfully post the buffer else it will return an error.
1312 **/
1313static int
1314lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1315 struct hbq_dmabuf *hbq_buf)
1316{
821 struct lpfc_hbq_entry *hbqe; 1317 struct lpfc_hbq_entry *hbqe;
822 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1318 dma_addr_t physaddr = hbq_buf->dbuf.phys;
823 1319
@@ -838,8 +1334,40 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
838 /* flush */ 1334 /* flush */
839 readl(phba->hbq_put + hbqno); 1335 readl(phba->hbq_put + hbqno);
840 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1336 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
841 } 1337 return 0;
842 return hbqe; 1338 } else
1339 return -ENOMEM;
1340}
1341
1342/**
1343 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1344 * @phba: Pointer to HBA context object.
1345 * @hbqno: HBQ number.
1346 * @hbq_buf: Pointer to HBQ buffer.
1347 *
1348 * This function is called with the hbalock held to post an RQE to the SLI4
1349 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1350 * the hbq_buffer_list and return zero, otherwise it will return an error.
1351 **/
1352static int
1353lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1354 struct hbq_dmabuf *hbq_buf)
1355{
1356 int rc;
1357 struct lpfc_rqe hrqe;
1358 struct lpfc_rqe drqe;
1359
1360 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1361 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1362 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1363 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1364 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1365 &hrqe, &drqe);
1366 if (rc < 0)
1367 return rc;
1368 hbq_buf->tag = rc;
1369 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1370 return 0;
843} 1371}
844 1372
845/* HBQ for ELS and CT traffic. */ 1373/* HBQ for ELS and CT traffic. */
@@ -914,7 +1442,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
914 dbuf.list); 1442 dbuf.list);
915 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1443 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
916 (hbqno << 16)); 1444 (hbqno << 16));
917 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1445 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
918 phba->hbqs[hbqno].buffer_count++; 1446 phba->hbqs[hbqno].buffer_count++;
919 posted++; 1447 posted++;
920 } else 1448 } else
@@ -965,6 +1493,25 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
965} 1493}
966 1494
967/** 1495/**
1496 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1497 * @phba: Pointer to HBA context object.
1498 * @hbqno: HBQ number.
1499 *
1500 * This function removes the first hbq buffer on an hbq list and returns a
1501 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1502 **/
1503static struct hbq_dmabuf *
1504lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1505{
1506 struct lpfc_dmabuf *d_buf;
1507
1508 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1509 if (!d_buf)
1510 return NULL;
1511 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1512}
1513
1514/**
968 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1515 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
969 * @phba: Pointer to HBA context object. 1516 * @phba: Pointer to HBA context object.
970 * @tag: Tag of the hbq buffer. 1517 * @tag: Tag of the hbq buffer.
@@ -985,12 +1532,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
985 if (hbqno >= LPFC_MAX_HBQS) 1532 if (hbqno >= LPFC_MAX_HBQS)
986 return NULL; 1533 return NULL;
987 1534
1535 spin_lock_irq(&phba->hbalock);
988 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 1536 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
989 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1537 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
990 if (hbq_buf->tag == tag) { 1538 if (hbq_buf->tag == tag) {
1539 spin_unlock_irq(&phba->hbalock);
991 return hbq_buf; 1540 return hbq_buf;
992 } 1541 }
993 } 1542 }
1543 spin_unlock_irq(&phba->hbalock);
994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 1544 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
995 "1803 Bad hbq tag. Data: x%x x%x\n", 1545 "1803 Bad hbq tag. Data: x%x x%x\n",
996 tag, phba->hbqs[tag >> 16].buffer_count); 1546 tag, phba->hbqs[tag >> 16].buffer_count);
@@ -1013,9 +1563,8 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
1013 1563
1014 if (hbq_buffer) { 1564 if (hbq_buffer) {
1015 hbqno = hbq_buffer->tag >> 16; 1565 hbqno = hbq_buffer->tag >> 16;
1016 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1566 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
1017 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1567 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1018 }
1019 } 1568 }
1020} 1569}
1021 1570
@@ -1086,6 +1635,15 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1086 case MBX_HEARTBEAT: 1635 case MBX_HEARTBEAT:
1087 case MBX_PORT_CAPABILITIES: 1636 case MBX_PORT_CAPABILITIES:
1088 case MBX_PORT_IOV_CONTROL: 1637 case MBX_PORT_IOV_CONTROL:
1638 case MBX_SLI4_CONFIG:
1639 case MBX_SLI4_REQ_FTRS:
1640 case MBX_REG_FCFI:
1641 case MBX_UNREG_FCFI:
1642 case MBX_REG_VFI:
1643 case MBX_UNREG_VFI:
1644 case MBX_INIT_VPI:
1645 case MBX_INIT_VFI:
1646 case MBX_RESUME_RPI:
1089 ret = mbxCommand; 1647 ret = mbxCommand;
1090 break; 1648 break;
1091 default: 1649 default:
@@ -1106,7 +1664,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1106 * will wake up thread waiting on the wait queue pointed by context1 1664 * will wake up thread waiting on the wait queue pointed by context1
1107 * of the mailbox. 1665 * of the mailbox.
1108 **/ 1666 **/
1109static void 1667void
1110lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 1668lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
1111{ 1669{
1112 wait_queue_head_t *pdone_q; 1670 wait_queue_head_t *pdone_q;
@@ -1140,7 +1698,7 @@ void
1140lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1698lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1141{ 1699{
1142 struct lpfc_dmabuf *mp; 1700 struct lpfc_dmabuf *mp;
1143 uint16_t rpi; 1701 uint16_t rpi, vpi;
1144 int rc; 1702 int rc;
1145 1703
1146 mp = (struct lpfc_dmabuf *) (pmb->context1); 1704 mp = (struct lpfc_dmabuf *) (pmb->context1);
@@ -1150,24 +1708,30 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1150 kfree(mp); 1708 kfree(mp);
1151 } 1709 }
1152 1710
1711 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
1712 (phba->sli_rev == LPFC_SLI_REV4))
1713 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
1714
1153 /* 1715 /*
1154 * If a REG_LOGIN succeeded after node is destroyed or node 1716 * If a REG_LOGIN succeeded after node is destroyed or node
1155 * is in re-discovery driver need to cleanup the RPI. 1717 * is in re-discovery driver need to cleanup the RPI.
1156 */ 1718 */
1157 if (!(phba->pport->load_flag & FC_UNLOADING) && 1719 if (!(phba->pport->load_flag & FC_UNLOADING) &&
1158 pmb->mb.mbxCommand == MBX_REG_LOGIN64 && 1720 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
1159 !pmb->mb.mbxStatus) { 1721 !pmb->u.mb.mbxStatus) {
1160 1722 rpi = pmb->u.mb.un.varWords[0];
1161 rpi = pmb->mb.un.varWords[0]; 1723 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
1162 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb); 1724 lpfc_unreg_login(phba, vpi, rpi, pmb);
1163 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1725 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1164 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1726 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1165 if (rc != MBX_NOT_FINISHED) 1727 if (rc != MBX_NOT_FINISHED)
1166 return; 1728 return;
1167 } 1729 }
1168 1730
1169 mempool_free(pmb, phba->mbox_mem_pool); 1731 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1170 return; 1732 lpfc_sli4_mbox_cmd_free(phba, pmb);
1733 else
1734 mempool_free(pmb, phba->mbox_mem_pool);
1171} 1735}
1172 1736
1173/** 1737/**
@@ -1204,7 +1768,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1204 if (pmb == NULL) 1768 if (pmb == NULL)
1205 break; 1769 break;
1206 1770
1207 pmbox = &pmb->mb; 1771 pmbox = &pmb->u.mb;
1208 1772
1209 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 1773 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
1210 if (pmb->vport) { 1774 if (pmb->vport) {
@@ -1233,9 +1797,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1233 /* Unknow mailbox command compl */ 1797 /* Unknow mailbox command compl */
1234 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1235 "(%d):0323 Unknown Mailbox command " 1799 "(%d):0323 Unknown Mailbox command "
1236 "%x Cmpl\n", 1800 "x%x (x%x) Cmpl\n",
1237 pmb->vport ? pmb->vport->vpi : 0, 1801 pmb->vport ? pmb->vport->vpi : 0,
1238 pmbox->mbxCommand); 1802 pmbox->mbxCommand,
1803 lpfc_sli4_mbox_opcode_get(phba, pmb));
1239 phba->link_state = LPFC_HBA_ERROR; 1804 phba->link_state = LPFC_HBA_ERROR;
1240 phba->work_hs = HS_FFER3; 1805 phba->work_hs = HS_FFER3;
1241 lpfc_handle_eratt(phba); 1806 lpfc_handle_eratt(phba);
@@ -1250,29 +1815,29 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1250 LOG_MBOX | LOG_SLI, 1815 LOG_MBOX | LOG_SLI,
1251 "(%d):0305 Mbox cmd cmpl " 1816 "(%d):0305 Mbox cmd cmpl "
1252 "error - RETRYing Data: x%x " 1817 "error - RETRYing Data: x%x "
1253 "x%x x%x x%x\n", 1818 "(x%x) x%x x%x x%x\n",
1254 pmb->vport ? pmb->vport->vpi :0, 1819 pmb->vport ? pmb->vport->vpi :0,
1255 pmbox->mbxCommand, 1820 pmbox->mbxCommand,
1821 lpfc_sli4_mbox_opcode_get(phba,
1822 pmb),
1256 pmbox->mbxStatus, 1823 pmbox->mbxStatus,
1257 pmbox->un.varWords[0], 1824 pmbox->un.varWords[0],
1258 pmb->vport->port_state); 1825 pmb->vport->port_state);
1259 pmbox->mbxStatus = 0; 1826 pmbox->mbxStatus = 0;
1260 pmbox->mbxOwner = OWN_HOST; 1827 pmbox->mbxOwner = OWN_HOST;
1261 spin_lock_irq(&phba->hbalock);
1262 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1263 spin_unlock_irq(&phba->hbalock);
1264 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1828 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1265 if (rc == MBX_SUCCESS) 1829 if (rc != MBX_NOT_FINISHED)
1266 continue; 1830 continue;
1267 } 1831 }
1268 } 1832 }
1269 1833
1270 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 1834 /* Mailbox cmd <cmd> Cmpl <cmpl> */
1271 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 1835 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
1272 "(%d):0307 Mailbox cmd x%x Cmpl x%p " 1836 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
1273 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 1837 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
1274 pmb->vport ? pmb->vport->vpi : 0, 1838 pmb->vport ? pmb->vport->vpi : 0,
1275 pmbox->mbxCommand, 1839 pmbox->mbxCommand,
1840 lpfc_sli4_mbox_opcode_get(phba, pmb),
1276 pmb->mbox_cmpl, 1841 pmb->mbox_cmpl,
1277 *((uint32_t *) pmbox), 1842 *((uint32_t *) pmbox),
1278 pmbox->un.varWords[0], 1843 pmbox->un.varWords[0],
@@ -1317,6 +1882,45 @@ lpfc_sli_get_buff(struct lpfc_hba *phba,
1317 return &hbq_entry->dbuf; 1882 return &hbq_entry->dbuf;
1318} 1883}
1319 1884
1885/**
1886 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
1887 * @phba: Pointer to HBA context object.
1888 * @pring: Pointer to driver SLI ring object.
1889 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
1890 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
1891 * @fch_type: the type for the first frame of the sequence.
1892 *
1893 * This function is called with no lock held. This function uses the r_ctl and
1894 * type of the received sequence to find the correct callback function to call
1895 * to process the sequence.
1896 **/
1897static int
1898lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1899 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
1900 uint32_t fch_type)
1901{
1902 int i;
1903
1904 /* unSolicited Responses */
1905 if (pring->prt[0].profile) {
1906 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1907 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1908 saveq);
1909 return 1;
1910 }
1911 /* We must search, based on rctl / type
1912 for the right routine */
1913 for (i = 0; i < pring->num_mask; i++) {
1914 if ((pring->prt[i].rctl == fch_r_ctl) &&
1915 (pring->prt[i].type == fch_type)) {
1916 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1917 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1918 (phba, pring, saveq);
1919 return 1;
1920 }
1921 }
1922 return 0;
1923}
1320 1924
1321/** 1925/**
1322 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 1926 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
@@ -1339,7 +1943,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1339 IOCB_t * irsp; 1943 IOCB_t * irsp;
1340 WORD5 * w5p; 1944 WORD5 * w5p;
1341 uint32_t Rctl, Type; 1945 uint32_t Rctl, Type;
1342 uint32_t match, i; 1946 uint32_t match;
1343 struct lpfc_iocbq *iocbq; 1947 struct lpfc_iocbq *iocbq;
1344 struct lpfc_dmabuf *dmzbuf; 1948 struct lpfc_dmabuf *dmzbuf;
1345 1949
@@ -1482,35 +2086,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1482 } 2086 }
1483 } 2087 }
1484 2088
1485 /* unSolicited Responses */ 2089 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
1486 if (pring->prt[0].profile) {
1487 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1488 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1489 saveq);
1490 match = 1;
1491 } else {
1492 /* We must search, based on rctl / type
1493 for the right routine */
1494 for (i = 0; i < pring->num_mask; i++) {
1495 if ((pring->prt[i].rctl == Rctl)
1496 && (pring->prt[i].type == Type)) {
1497 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1498 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1499 (phba, pring, saveq);
1500 match = 1;
1501 break;
1502 }
1503 }
1504 }
1505 if (match == 0) {
1506 /* Unexpected Rctl / Type received */
1507 /* Ring <ringno> handler: unexpected
1508 Rctl <Rctl> Type <Type> received */
1509 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2090 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1510 "0313 Ring %d handler: unexpected Rctl x%x " 2091 "0313 Ring %d handler: unexpected Rctl x%x "
1511 "Type x%x received\n", 2092 "Type x%x received\n",
1512 pring->ringno, Rctl, Type); 2093 pring->ringno, Rctl, Type);
1513 } 2094
1514 return 1; 2095 return 1;
1515} 2096}
1516 2097
@@ -1552,6 +2133,37 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1552} 2133}
1553 2134
1554/** 2135/**
2136 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2137 * @phba: Pointer to HBA context object.
2138 * @pring: Pointer to driver SLI ring object.
2139 * @iotag: IOCB tag.
2140 *
2141 * This function looks up the iocb_lookup table to get the command iocb
2142 * corresponding to the given iotag. This function is called with the
2143 * hbalock held.
2144 * This function returns the command iocb object if it finds the command
2145 * iocb else returns NULL.
2146 **/
2147static struct lpfc_iocbq *
2148lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2149 struct lpfc_sli_ring *pring, uint16_t iotag)
2150{
2151 struct lpfc_iocbq *cmd_iocb;
2152
2153 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2154 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2155 list_del_init(&cmd_iocb->list);
2156 pring->txcmplq_cnt--;
2157 return cmd_iocb;
2158 }
2159
2160 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2161 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2162 iotag, phba->sli.last_iotag);
2163 return NULL;
2164}
2165
2166/**
1555 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2167 * lpfc_sli_process_sol_iocb - process solicited iocb completion
1556 * @phba: Pointer to HBA context object. 2168 * @phba: Pointer to HBA context object.
1557 * @pring: Pointer to driver SLI ring object. 2169 * @pring: Pointer to driver SLI ring object.
@@ -1954,7 +2566,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1954 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2566 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1955 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2567 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1956 spin_unlock_irqrestore(&phba->hbalock, iflag); 2568 spin_unlock_irqrestore(&phba->hbalock, iflag);
1957 lpfc_rampdown_queue_depth(phba); 2569 phba->lpfc_rampdown_queue_depth(phba);
1958 spin_lock_irqsave(&phba->hbalock, iflag); 2570 spin_lock_irqsave(&phba->hbalock, iflag);
1959 } 2571 }
1960 2572
@@ -2068,39 +2680,215 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2068} 2680}
2069 2681
2070/** 2682/**
2071 * lpfc_sli_handle_slow_ring_event - Handle ring events for non-FCP rings 2683 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
2684 * @phba: Pointer to HBA context object.
2685 * @pring: Pointer to driver SLI ring object.
2686 * @rspiocbp: Pointer to driver response IOCB object.
2687 *
2688 * This function is called from the worker thread when there is a slow-path
2689 * response IOCB to process. This function chains all the response iocbs until
2690 * seeing the iocb with the LE bit set. The function will call
2691 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
2692 * completion of a command iocb. The function will call the
2693 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
2694 * The function frees the resources or calls the completion handler if this
2695 * iocb is an abort completion. The function returns NULL when the response
2696 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
2697 * this function shall chain the iocb on to the iocb_continueq and return the
2698 * response iocb passed in.
2699 **/
2700static struct lpfc_iocbq *
2701lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2702 struct lpfc_iocbq *rspiocbp)
2703{
2704 struct lpfc_iocbq *saveq;
2705 struct lpfc_iocbq *cmdiocbp;
2706 struct lpfc_iocbq *next_iocb;
2707 IOCB_t *irsp = NULL;
2708 uint32_t free_saveq;
2709 uint8_t iocb_cmd_type;
2710 lpfc_iocb_type type;
2711 unsigned long iflag;
2712 int rc;
2713
2714 spin_lock_irqsave(&phba->hbalock, iflag);
2715 /* First add the response iocb to the countinueq list */
2716 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
2717 pring->iocb_continueq_cnt++;
2718
2719 /* Now, determine whetehr the list is completed for processing */
2720 irsp = &rspiocbp->iocb;
2721 if (irsp->ulpLe) {
2722 /*
2723 * By default, the driver expects to free all resources
2724 * associated with this iocb completion.
2725 */
2726 free_saveq = 1;
2727 saveq = list_get_first(&pring->iocb_continueq,
2728 struct lpfc_iocbq, list);
2729 irsp = &(saveq->iocb);
2730 list_del_init(&pring->iocb_continueq);
2731 pring->iocb_continueq_cnt = 0;
2732
2733 pring->stats.iocb_rsp++;
2734
2735 /*
2736 * If resource errors reported from HBA, reduce
2737 * queuedepths of the SCSI device.
2738 */
2739 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2740 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2741 spin_unlock_irqrestore(&phba->hbalock, iflag);
2742 phba->lpfc_rampdown_queue_depth(phba);
2743 spin_lock_irqsave(&phba->hbalock, iflag);
2744 }
2745
2746 if (irsp->ulpStatus) {
2747 /* Rsp ring <ringno> error: IOCB */
2748 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2749 "0328 Rsp Ring %d error: "
2750 "IOCB Data: "
2751 "x%x x%x x%x x%x "
2752 "x%x x%x x%x x%x "
2753 "x%x x%x x%x x%x "
2754 "x%x x%x x%x x%x\n",
2755 pring->ringno,
2756 irsp->un.ulpWord[0],
2757 irsp->un.ulpWord[1],
2758 irsp->un.ulpWord[2],
2759 irsp->un.ulpWord[3],
2760 irsp->un.ulpWord[4],
2761 irsp->un.ulpWord[5],
2762 *(((uint32_t *) irsp) + 6),
2763 *(((uint32_t *) irsp) + 7),
2764 *(((uint32_t *) irsp) + 8),
2765 *(((uint32_t *) irsp) + 9),
2766 *(((uint32_t *) irsp) + 10),
2767 *(((uint32_t *) irsp) + 11),
2768 *(((uint32_t *) irsp) + 12),
2769 *(((uint32_t *) irsp) + 13),
2770 *(((uint32_t *) irsp) + 14),
2771 *(((uint32_t *) irsp) + 15));
2772 }
2773
2774 /*
2775 * Fetch the IOCB command type and call the correct completion
2776 * routine. Solicited and Unsolicited IOCBs on the ELS ring
2777 * get freed back to the lpfc_iocb_list by the discovery
2778 * kernel thread.
2779 */
2780 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2781 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2782 switch (type) {
2783 case LPFC_SOL_IOCB:
2784 spin_unlock_irqrestore(&phba->hbalock, iflag);
2785 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
2786 spin_lock_irqsave(&phba->hbalock, iflag);
2787 break;
2788
2789 case LPFC_UNSOL_IOCB:
2790 spin_unlock_irqrestore(&phba->hbalock, iflag);
2791 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
2792 spin_lock_irqsave(&phba->hbalock, iflag);
2793 if (!rc)
2794 free_saveq = 0;
2795 break;
2796
2797 case LPFC_ABORT_IOCB:
2798 cmdiocbp = NULL;
2799 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
2800 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
2801 saveq);
2802 if (cmdiocbp) {
2803 /* Call the specified completion routine */
2804 if (cmdiocbp->iocb_cmpl) {
2805 spin_unlock_irqrestore(&phba->hbalock,
2806 iflag);
2807 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
2808 saveq);
2809 spin_lock_irqsave(&phba->hbalock,
2810 iflag);
2811 } else
2812 __lpfc_sli_release_iocbq(phba,
2813 cmdiocbp);
2814 }
2815 break;
2816
2817 case LPFC_UNKNOWN_IOCB:
2818 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2819 char adaptermsg[LPFC_MAX_ADPTMSG];
2820 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2821 memcpy(&adaptermsg[0], (uint8_t *)irsp,
2822 MAX_MSG_DATA);
2823 dev_warn(&((phba->pcidev)->dev),
2824 "lpfc%d: %s\n",
2825 phba->brd_no, adaptermsg);
2826 } else {
2827 /* Unknown IOCB command */
2828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2829 "0335 Unknown IOCB "
2830 "command Data: x%x "
2831 "x%x x%x x%x\n",
2832 irsp->ulpCommand,
2833 irsp->ulpStatus,
2834 irsp->ulpIoTag,
2835 irsp->ulpContext);
2836 }
2837 break;
2838 }
2839
2840 if (free_saveq) {
2841 list_for_each_entry_safe(rspiocbp, next_iocb,
2842 &saveq->list, list) {
2843 list_del(&rspiocbp->list);
2844 __lpfc_sli_release_iocbq(phba, rspiocbp);
2845 }
2846 __lpfc_sli_release_iocbq(phba, saveq);
2847 }
2848 rspiocbp = NULL;
2849 }
2850 spin_unlock_irqrestore(&phba->hbalock, iflag);
2851 return rspiocbp;
2852}
2853
2854/**
2855 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
2072 * @phba: Pointer to HBA context object. 2856 * @phba: Pointer to HBA context object.
2073 * @pring: Pointer to driver SLI ring object. 2857 * @pring: Pointer to driver SLI ring object.
2074 * @mask: Host attention register mask for this ring. 2858 * @mask: Host attention register mask for this ring.
2075 * 2859 *
2076 * This function is called from the worker thread when there is a ring 2860 * This routine wraps the actual slow_ring event process routine from the
2077 * event for non-fcp rings. The caller does not hold any lock . 2861 * API jump table function pointer from the lpfc_hba struct.
2078 * The function processes each response iocb in the response ring until it
2079 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2080 * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the
2081 * response iocb indicates a completion of a command iocb. The function
2082 * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited
2083 * iocb. The function frees the resources or calls the completion handler if
2084 * this iocb is an abort completion. The function returns 0 when the allocated
2085 * iocbs are not freed, otherwise returns 1.
2086 **/ 2862 **/
2087int 2863void
2088lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 2864lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2089 struct lpfc_sli_ring *pring, uint32_t mask) 2865 struct lpfc_sli_ring *pring, uint32_t mask)
2090{ 2866{
2867 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
2868}
2869
2870/**
2871 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
2872 * @phba: Pointer to HBA context object.
2873 * @pring: Pointer to driver SLI ring object.
2874 * @mask: Host attention register mask for this ring.
2875 *
2876 * This function is called from the worker thread when there is a ring event
2877 * for non-fcp rings. The caller does not hold any lock. The function will
2878 * remove each response iocb in the response ring and calls the handle
2879 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2880 **/
2881static void
2882lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
2883 struct lpfc_sli_ring *pring, uint32_t mask)
2884{
2091 struct lpfc_pgp *pgp; 2885 struct lpfc_pgp *pgp;
2092 IOCB_t *entry; 2886 IOCB_t *entry;
2093 IOCB_t *irsp = NULL; 2887 IOCB_t *irsp = NULL;
2094 struct lpfc_iocbq *rspiocbp = NULL; 2888 struct lpfc_iocbq *rspiocbp = NULL;
2095 struct lpfc_iocbq *next_iocb;
2096 struct lpfc_iocbq *cmdiocbp;
2097 struct lpfc_iocbq *saveq;
2098 uint8_t iocb_cmd_type;
2099 lpfc_iocb_type type;
2100 uint32_t status, free_saveq;
2101 uint32_t portRspPut, portRspMax; 2889 uint32_t portRspPut, portRspMax;
2102 int rc = 1;
2103 unsigned long iflag; 2890 unsigned long iflag;
2891 uint32_t status;
2104 2892
2105 pgp = &phba->port_gp[pring->ringno]; 2893 pgp = &phba->port_gp[pring->ringno];
2106 spin_lock_irqsave(&phba->hbalock, iflag); 2894 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -2128,7 +2916,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2128 phba->work_hs = HS_FFER3; 2916 phba->work_hs = HS_FFER3;
2129 lpfc_handle_eratt(phba); 2917 lpfc_handle_eratt(phba);
2130 2918
2131 return 1; 2919 return;
2132 } 2920 }
2133 2921
2134 rmb(); 2922 rmb();
@@ -2173,138 +2961,10 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2173 2961
2174 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2962 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2175 2963
2176 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 2964 spin_unlock_irqrestore(&phba->hbalock, iflag);
2177 2965 /* Handle the response IOCB */
2178 pring->iocb_continueq_cnt++; 2966 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
2179 if (irsp->ulpLe) { 2967 spin_lock_irqsave(&phba->hbalock, iflag);
2180 /*
2181 * By default, the driver expects to free all resources
2182 * associated with this iocb completion.
2183 */
2184 free_saveq = 1;
2185 saveq = list_get_first(&pring->iocb_continueq,
2186 struct lpfc_iocbq, list);
2187 irsp = &(saveq->iocb);
2188 list_del_init(&pring->iocb_continueq);
2189 pring->iocb_continueq_cnt = 0;
2190
2191 pring->stats.iocb_rsp++;
2192
2193 /*
2194 * If resource errors reported from HBA, reduce
2195 * queuedepths of the SCSI device.
2196 */
2197 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2198 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2199 spin_unlock_irqrestore(&phba->hbalock, iflag);
2200 lpfc_rampdown_queue_depth(phba);
2201 spin_lock_irqsave(&phba->hbalock, iflag);
2202 }
2203
2204 if (irsp->ulpStatus) {
2205 /* Rsp ring <ringno> error: IOCB */
2206 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2207 "0328 Rsp Ring %d error: "
2208 "IOCB Data: "
2209 "x%x x%x x%x x%x "
2210 "x%x x%x x%x x%x "
2211 "x%x x%x x%x x%x "
2212 "x%x x%x x%x x%x\n",
2213 pring->ringno,
2214 irsp->un.ulpWord[0],
2215 irsp->un.ulpWord[1],
2216 irsp->un.ulpWord[2],
2217 irsp->un.ulpWord[3],
2218 irsp->un.ulpWord[4],
2219 irsp->un.ulpWord[5],
2220 *(((uint32_t *) irsp) + 6),
2221 *(((uint32_t *) irsp) + 7),
2222 *(((uint32_t *) irsp) + 8),
2223 *(((uint32_t *) irsp) + 9),
2224 *(((uint32_t *) irsp) + 10),
2225 *(((uint32_t *) irsp) + 11),
2226 *(((uint32_t *) irsp) + 12),
2227 *(((uint32_t *) irsp) + 13),
2228 *(((uint32_t *) irsp) + 14),
2229 *(((uint32_t *) irsp) + 15));
2230 }
2231
2232 /*
2233 * Fetch the IOCB command type and call the correct
2234 * completion routine. Solicited and Unsolicited
2235 * IOCBs on the ELS ring get freed back to the
2236 * lpfc_iocb_list by the discovery kernel thread.
2237 */
2238 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2239 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2240 if (type == LPFC_SOL_IOCB) {
2241 spin_unlock_irqrestore(&phba->hbalock, iflag);
2242 rc = lpfc_sli_process_sol_iocb(phba, pring,
2243 saveq);
2244 spin_lock_irqsave(&phba->hbalock, iflag);
2245 } else if (type == LPFC_UNSOL_IOCB) {
2246 spin_unlock_irqrestore(&phba->hbalock, iflag);
2247 rc = lpfc_sli_process_unsol_iocb(phba, pring,
2248 saveq);
2249 spin_lock_irqsave(&phba->hbalock, iflag);
2250 if (!rc)
2251 free_saveq = 0;
2252 } else if (type == LPFC_ABORT_IOCB) {
2253 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
2254 ((cmdiocbp =
2255 lpfc_sli_iocbq_lookup(phba, pring,
2256 saveq)))) {
2257 /* Call the specified completion
2258 routine */
2259 if (cmdiocbp->iocb_cmpl) {
2260 spin_unlock_irqrestore(
2261 &phba->hbalock,
2262 iflag);
2263 (cmdiocbp->iocb_cmpl) (phba,
2264 cmdiocbp, saveq);
2265 spin_lock_irqsave(
2266 &phba->hbalock,
2267 iflag);
2268 } else
2269 __lpfc_sli_release_iocbq(phba,
2270 cmdiocbp);
2271 }
2272 } else if (type == LPFC_UNKNOWN_IOCB) {
2273 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2274
2275 char adaptermsg[LPFC_MAX_ADPTMSG];
2276
2277 memset(adaptermsg, 0,
2278 LPFC_MAX_ADPTMSG);
2279 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2280 MAX_MSG_DATA);
2281 dev_warn(&((phba->pcidev)->dev),
2282 "lpfc%d: %s\n",
2283 phba->brd_no, adaptermsg);
2284 } else {
2285 /* Unknown IOCB command */
2286 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2287 "0335 Unknown IOCB "
2288 "command Data: x%x "
2289 "x%x x%x x%x\n",
2290 irsp->ulpCommand,
2291 irsp->ulpStatus,
2292 irsp->ulpIoTag,
2293 irsp->ulpContext);
2294 }
2295 }
2296
2297 if (free_saveq) {
2298 list_for_each_entry_safe(rspiocbp, next_iocb,
2299 &saveq->list, list) {
2300 list_del(&rspiocbp->list);
2301 __lpfc_sli_release_iocbq(phba,
2302 rspiocbp);
2303 }
2304 __lpfc_sli_release_iocbq(phba, saveq);
2305 }
2306 rspiocbp = NULL;
2307 }
2308 2968
2309 /* 2969 /*
2310 * If the port response put pointer has not been updated, sync 2970 * If the port response put pointer has not been updated, sync
@@ -2338,7 +2998,37 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2338 } 2998 }
2339 2999
2340 spin_unlock_irqrestore(&phba->hbalock, iflag); 3000 spin_unlock_irqrestore(&phba->hbalock, iflag);
2341 return rc; 3001 return;
3002}
3003
3004/**
3005 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3006 * @phba: Pointer to HBA context object.
3007 * @pring: Pointer to driver SLI ring object.
3008 * @mask: Host attention register mask for this ring.
3009 *
3010 * This function is called from the worker thread when there is a pending
3011 * ELS response iocb on the driver internal slow-path response iocb worker
3012 * queue. The caller does not hold any lock. The function will remove each
3013 * response iocb from the response worker queue and calls the handle
3014 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3015 **/
3016static void
3017lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3018 struct lpfc_sli_ring *pring, uint32_t mask)
3019{
3020 struct lpfc_iocbq *irspiocbq;
3021 unsigned long iflag;
3022
3023 while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
3024 /* Get the response iocb from the head of work queue */
3025 spin_lock_irqsave(&phba->hbalock, iflag);
3026 list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
3027 irspiocbq, struct lpfc_iocbq, list);
3028 spin_unlock_irqrestore(&phba->hbalock, iflag);
3029 /* Process the response iocb */
3030 lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
3031 }
2342} 3032}
2343 3033
2344/** 3034/**
@@ -2420,7 +3110,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2420} 3110}
2421 3111
2422/** 3112/**
2423 * lpfc_sli_brdready - Check for host status bits 3113 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
2424 * @phba: Pointer to HBA context object. 3114 * @phba: Pointer to HBA context object.
2425 * @mask: Bit mask to be checked. 3115 * @mask: Bit mask to be checked.
2426 * 3116 *
@@ -2432,8 +3122,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2432 * function returns 1 when HBA fail to restart otherwise returns 3122 * function returns 1 when HBA fail to restart otherwise returns
2433 * zero. 3123 * zero.
2434 **/ 3124 **/
2435int 3125static int
2436lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3126lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
2437{ 3127{
2438 uint32_t status; 3128 uint32_t status;
2439 int i = 0; 3129 int i = 0;
@@ -2477,6 +3167,56 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
2477 return retval; 3167 return retval;
2478} 3168}
2479 3169
3170/**
3171 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3172 * @phba: Pointer to HBA context object.
3173 * @mask: Bit mask to be checked.
3174 *
3175 * This function checks the host status register to check if HBA is
3176 * ready. This function will wait in a loop for the HBA to be ready
3177 * If the HBA is not ready , the function will will reset the HBA PCI
3178 * function again. The function returns 1 when HBA fail to be ready
3179 * otherwise returns zero.
3180 **/
3181static int
3182lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3183{
3184 uint32_t status;
3185 int retval = 0;
3186
3187 /* Read the HBA Host Status Register */
3188 status = lpfc_sli4_post_status_check(phba);
3189
3190 if (status) {
3191 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3192 lpfc_sli_brdrestart(phba);
3193 status = lpfc_sli4_post_status_check(phba);
3194 }
3195
3196 /* Check to see if any errors occurred during init */
3197 if (status) {
3198 phba->link_state = LPFC_HBA_ERROR;
3199 retval = 1;
3200 } else
3201 phba->sli4_hba.intr_enable = 0;
3202
3203 return retval;
3204}
3205
3206/**
3207 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3208 * @phba: Pointer to HBA context object.
3209 * @mask: Bit mask to be checked.
3210 *
3211 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3212 * from the API jump table function pointer from the lpfc_hba struct.
3213 **/
3214int
3215lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3216{
3217 return phba->lpfc_sli_brdready(phba, mask);
3218}
3219
2480#define BARRIER_TEST_PATTERN (0xdeadbeef) 3220#define BARRIER_TEST_PATTERN (0xdeadbeef)
2481 3221
2482/** 3222/**
@@ -2532,7 +3272,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
2532 mdelay(1); 3272 mdelay(1);
2533 3273
2534 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 3274 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
2535 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || 3275 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2536 phba->pport->stopped) 3276 phba->pport->stopped)
2537 goto restore_hc; 3277 goto restore_hc;
2538 else 3278 else
@@ -2613,7 +3353,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2613 return 1; 3353 return 1;
2614 } 3354 }
2615 3355
2616 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 3356 spin_lock_irq(&phba->hbalock);
3357 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3358 spin_unlock_irq(&phba->hbalock);
2617 3359
2618 mempool_free(pmb, phba->mbox_mem_pool); 3360 mempool_free(pmb, phba->mbox_mem_pool);
2619 3361
@@ -2636,10 +3378,10 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2636 } 3378 }
2637 spin_lock_irq(&phba->hbalock); 3379 spin_lock_irq(&phba->hbalock);
2638 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3380 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3381 psli->mbox_active = NULL;
2639 phba->link_flag &= ~LS_IGNORE_ERATT; 3382 phba->link_flag &= ~LS_IGNORE_ERATT;
2640 spin_unlock_irq(&phba->hbalock); 3383 spin_unlock_irq(&phba->hbalock);
2641 3384
2642 psli->mbox_active = NULL;
2643 lpfc_hba_down_post(phba); 3385 lpfc_hba_down_post(phba);
2644 phba->link_state = LPFC_HBA_ERROR; 3386 phba->link_state = LPFC_HBA_ERROR;
2645 3387
@@ -2647,7 +3389,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2647} 3389}
2648 3390
2649/** 3391/**
2650 * lpfc_sli_brdreset - Reset the HBA 3392 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
2651 * @phba: Pointer to HBA context object. 3393 * @phba: Pointer to HBA context object.
2652 * 3394 *
2653 * This function resets the HBA by writing HC_INITFF to the control 3395 * This function resets the HBA by writing HC_INITFF to the control
@@ -2683,7 +3425,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2683 (cfg_value & 3425 (cfg_value &
2684 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3426 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
2685 3427
2686 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); 3428 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3429
2687 /* Now toggle INITFF bit in the Host Control Register */ 3430 /* Now toggle INITFF bit in the Host Control Register */
2688 writel(HC_INITFF, phba->HCregaddr); 3431 writel(HC_INITFF, phba->HCregaddr);
2689 mdelay(1); 3432 mdelay(1);
@@ -2710,7 +3453,66 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2710} 3453}
2711 3454
2712/** 3455/**
2713 * lpfc_sli_brdrestart - Restart the HBA 3456 * lpfc_sli4_brdreset - Reset a sli-4 HBA
3457 * @phba: Pointer to HBA context object.
3458 *
3459 * This function resets a SLI4 HBA. This function disables PCI layer parity
3460 * checking during resets the device. The caller is not required to hold
3461 * any locks.
3462 *
3463 * This function returns 0 always.
3464 **/
3465int
3466lpfc_sli4_brdreset(struct lpfc_hba *phba)
3467{
3468 struct lpfc_sli *psli = &phba->sli;
3469 uint16_t cfg_value;
3470 uint8_t qindx;
3471
3472 /* Reset HBA */
3473 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3474 "0295 Reset HBA Data: x%x x%x\n",
3475 phba->pport->port_state, psli->sli_flag);
3476
3477 /* perform board reset */
3478 phba->fc_eventTag = 0;
3479 phba->pport->fc_myDID = 0;
3480 phba->pport->fc_prevDID = 0;
3481
3482 /* Turn off parity checking and serr during the physical reset */
3483 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3484 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3485 (cfg_value &
3486 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3487
3488 spin_lock_irq(&phba->hbalock);
3489 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3490 phba->fcf.fcf_flag = 0;
3491 /* Clean up the child queue list for the CQs */
3492 list_del_init(&phba->sli4_hba.mbx_wq->list);
3493 list_del_init(&phba->sli4_hba.els_wq->list);
3494 list_del_init(&phba->sli4_hba.hdr_rq->list);
3495 list_del_init(&phba->sli4_hba.dat_rq->list);
3496 list_del_init(&phba->sli4_hba.mbx_cq->list);
3497 list_del_init(&phba->sli4_hba.els_cq->list);
3498 list_del_init(&phba->sli4_hba.rxq_cq->list);
3499 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3500 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3501 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
3502 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3503 spin_unlock_irq(&phba->hbalock);
3504
3505 /* Now physically reset the device */
3506 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3507 "0389 Performing PCI function reset!\n");
3508 /* Perform FCoE PCI function reset */
3509 lpfc_pci_function_reset(phba);
3510
3511 return 0;
3512}
3513
3514/**
3515 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
2714 * @phba: Pointer to HBA context object. 3516 * @phba: Pointer to HBA context object.
2715 * 3517 *
2716 * This function is called in the SLI initialization code path to 3518 * This function is called in the SLI initialization code path to
@@ -2722,8 +3524,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2722 * The function does not guarantee completion of MBX_RESTART mailbox 3524 * The function does not guarantee completion of MBX_RESTART mailbox
2723 * command before the return of this function. 3525 * command before the return of this function.
2724 **/ 3526 **/
2725int 3527static int
2726lpfc_sli_brdrestart(struct lpfc_hba *phba) 3528lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
2727{ 3529{
2728 MAILBOX_t *mb; 3530 MAILBOX_t *mb;
2729 struct lpfc_sli *psli; 3531 struct lpfc_sli *psli;
@@ -2762,7 +3564,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2762 lpfc_sli_brdreset(phba); 3564 lpfc_sli_brdreset(phba);
2763 phba->pport->stopped = 0; 3565 phba->pport->stopped = 0;
2764 phba->link_state = LPFC_INIT_START; 3566 phba->link_state = LPFC_INIT_START;
2765 3567 phba->hba_flag = 0;
2766 spin_unlock_irq(&phba->hbalock); 3568 spin_unlock_irq(&phba->hbalock);
2767 3569
2768 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3570 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -2777,6 +3579,55 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2777} 3579}
2778 3580
2779/** 3581/**
3582 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3583 * @phba: Pointer to HBA context object.
3584 *
3585 * This function is called in the SLI initialization code path to restart
3586 * a SLI4 HBA. The caller is not required to hold any lock.
3587 * At the end of the function, it calls lpfc_hba_down_post function to
3588 * free any pending commands.
3589 **/
3590static int
3591lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3592{
3593 struct lpfc_sli *psli = &phba->sli;
3594
3595
3596 /* Restart HBA */
3597 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3598 "0296 Restart HBA Data: x%x x%x\n",
3599 phba->pport->port_state, psli->sli_flag);
3600
3601 lpfc_sli4_brdreset(phba);
3602
3603 spin_lock_irq(&phba->hbalock);
3604 phba->pport->stopped = 0;
3605 phba->link_state = LPFC_INIT_START;
3606 phba->hba_flag = 0;
3607 spin_unlock_irq(&phba->hbalock);
3608
3609 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3610 psli->stats_start = get_seconds();
3611
3612 lpfc_hba_down_post(phba);
3613
3614 return 0;
3615}
3616
3617/**
3618 * lpfc_sli_brdrestart - Wrapper func for restarting hba
3619 * @phba: Pointer to HBA context object.
3620 *
3621 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
3622 * API jump table function pointer from the lpfc_hba struct.
3623**/
3624int
3625lpfc_sli_brdrestart(struct lpfc_hba *phba)
3626{
3627 return phba->lpfc_sli_brdrestart(phba);
3628}
3629
3630/**
2780 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 3631 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
2781 * @phba: Pointer to HBA context object. 3632 * @phba: Pointer to HBA context object.
2782 * 3633 *
@@ -2940,7 +3791,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2940 if (!pmb) 3791 if (!pmb)
2941 return -ENOMEM; 3792 return -ENOMEM;
2942 3793
2943 pmbox = &pmb->mb; 3794 pmbox = &pmb->u.mb;
2944 3795
2945 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 3796 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2946 phba->link_state = LPFC_INIT_MBX_CMDS; 3797 phba->link_state = LPFC_INIT_MBX_CMDS;
@@ -2984,6 +3835,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2984} 3835}
2985 3836
2986/** 3837/**
3838 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
3839 * @phba: Pointer to HBA context object.
3840 *
3841 * This function is called during the SLI initialization to configure
3842 * all the HBQs and post buffers to the HBQ. The caller is not
3843 * required to hold any locks. This function will return zero if successful
3844 * else it will return negative error code.
3845 **/
3846static int
3847lpfc_sli4_rb_setup(struct lpfc_hba *phba)
3848{
3849 phba->hbq_in_use = 1;
3850 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
3851 phba->hbq_count = 1;
3852 /* Initially populate or replenish the HBQs */
3853 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
3854 return 0;
3855}
3856
3857/**
2987 * lpfc_sli_config_port - Issue config port mailbox command 3858 * lpfc_sli_config_port - Issue config port mailbox command
2988 * @phba: Pointer to HBA context object. 3859 * @phba: Pointer to HBA context object.
2989 * @sli_mode: sli mode - 2/3 3860 * @sli_mode: sli mode - 2/3
@@ -3047,33 +3918,43 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3048 "0442 Adapter failed to init, mbxCmd x%x " 3919 "0442 Adapter failed to init, mbxCmd x%x "
3049 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 3920 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
3050 pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0); 3921 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
3051 spin_lock_irq(&phba->hbalock); 3922 spin_lock_irq(&phba->hbalock);
3052 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 3923 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
3053 spin_unlock_irq(&phba->hbalock); 3924 spin_unlock_irq(&phba->hbalock);
3054 rc = -ENXIO; 3925 rc = -ENXIO;
3055 } else 3926 } else {
3927 /* Allow asynchronous mailbox command to go through */
3928 spin_lock_irq(&phba->hbalock);
3929 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
3930 spin_unlock_irq(&phba->hbalock);
3056 done = 1; 3931 done = 1;
3932 }
3057 } 3933 }
3058 if (!done) { 3934 if (!done) {
3059 rc = -EINVAL; 3935 rc = -EINVAL;
3060 goto do_prep_failed; 3936 goto do_prep_failed;
3061 } 3937 }
3062 if (pmb->mb.un.varCfgPort.sli_mode == 3) { 3938 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
3063 if (!pmb->mb.un.varCfgPort.cMA) { 3939 if (!pmb->u.mb.un.varCfgPort.cMA) {
3064 rc = -ENXIO; 3940 rc = -ENXIO;
3065 goto do_prep_failed; 3941 goto do_prep_failed;
3066 } 3942 }
3067 if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) { 3943 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
3068 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3944 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3069 phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi; 3945 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
3946 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
3947 phba->max_vpi : phba->max_vports;
3948
3070 } else 3949 } else
3071 phba->max_vpi = 0; 3950 phba->max_vpi = 0;
3072 if (pmb->mb.un.varCfgPort.gerbm) 3951 if (pmb->u.mb.un.varCfgPort.gdss)
3952 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
3953 if (pmb->u.mb.un.varCfgPort.gerbm)
3073 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 3954 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
3074 if (pmb->mb.un.varCfgPort.gcrp) 3955 if (pmb->u.mb.un.varCfgPort.gcrp)
3075 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 3956 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3076 if (pmb->mb.un.varCfgPort.ginb) { 3957 if (pmb->u.mb.un.varCfgPort.ginb) {
3077 phba->sli3_options |= LPFC_SLI3_INB_ENABLED; 3958 phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
3078 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; 3959 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
3079 phba->port_gp = phba->mbox->us.s3_inb_pgp.port; 3960 phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
@@ -3089,7 +3970,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3089 } 3970 }
3090 3971
3091 if (phba->cfg_enable_bg) { 3972 if (phba->cfg_enable_bg) {
3092 if (pmb->mb.un.varCfgPort.gbg) 3973 if (pmb->u.mb.un.varCfgPort.gbg)
3093 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 3974 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
3094 else 3975 else
3095 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -3184,8 +4065,9 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
3184 if (rc) 4065 if (rc)
3185 goto lpfc_sli_hba_setup_error; 4066 goto lpfc_sli_hba_setup_error;
3186 } 4067 }
3187 4068 spin_lock_irq(&phba->hbalock);
3188 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4069 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4070 spin_unlock_irq(&phba->hbalock);
3189 4071
3190 rc = lpfc_config_port_post(phba); 4072 rc = lpfc_config_port_post(phba);
3191 if (rc) 4073 if (rc)
@@ -3200,6 +4082,493 @@ lpfc_sli_hba_setup_error:
3200 return rc; 4082 return rc;
3201} 4083}
3202 4084
4085/**
4086 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4087 * @phba: Pointer to HBA context object.
4088 * @mboxq: mailbox pointer.
4089 * This function issue a dump mailbox command to read config region
4090 * 23 and parse the records in the region and populate driver
4091 * data structure.
4092 **/
4093static int
4094lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4095 LPFC_MBOXQ_t *mboxq)
4096{
4097 struct lpfc_dmabuf *mp;
4098 struct lpfc_mqe *mqe;
4099 uint32_t data_length;
4100 int rc;
4101
4102 /* Program the default value of vlan_id and fc_map */
4103 phba->valid_vlan = 0;
4104 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4105 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4106 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4107
4108 mqe = &mboxq->u.mqe;
4109 if (lpfc_dump_fcoe_param(phba, mboxq))
4110 return -ENOMEM;
4111
4112 mp = (struct lpfc_dmabuf *) mboxq->context1;
4113 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4114
4115 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4116 "(%d):2571 Mailbox cmd x%x Status x%x "
4117 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4118 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4119 "CQ: x%x x%x x%x x%x\n",
4120 mboxq->vport ? mboxq->vport->vpi : 0,
4121 bf_get(lpfc_mqe_command, mqe),
4122 bf_get(lpfc_mqe_status, mqe),
4123 mqe->un.mb_words[0], mqe->un.mb_words[1],
4124 mqe->un.mb_words[2], mqe->un.mb_words[3],
4125 mqe->un.mb_words[4], mqe->un.mb_words[5],
4126 mqe->un.mb_words[6], mqe->un.mb_words[7],
4127 mqe->un.mb_words[8], mqe->un.mb_words[9],
4128 mqe->un.mb_words[10], mqe->un.mb_words[11],
4129 mqe->un.mb_words[12], mqe->un.mb_words[13],
4130 mqe->un.mb_words[14], mqe->un.mb_words[15],
4131 mqe->un.mb_words[16], mqe->un.mb_words[50],
4132 mboxq->mcqe.word0,
4133 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4134 mboxq->mcqe.trailer);
4135
4136 if (rc) {
4137 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4138 kfree(mp);
4139 return -EIO;
4140 }
4141 data_length = mqe->un.mb_words[5];
4142 if (data_length > DMP_FCOEPARAM_RGN_SIZE) {
4143 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4144 kfree(mp);
4145 return -EIO;
4146 }
4147
4148 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4149 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4150 kfree(mp);
4151 return 0;
4152}
4153
4154/**
4155 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4156 * @phba: pointer to lpfc hba data structure.
4157 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4158 * @vpd: pointer to the memory to hold resulting port vpd data.
4159 * @vpd_size: On input, the number of bytes allocated to @vpd.
4160 * On output, the number of data bytes in @vpd.
4161 *
4162 * This routine executes a READ_REV SLI4 mailbox command. In
4163 * addition, this routine gets the port vpd data.
4164 *
4165 * Return codes
4166 * 0 - sucessful
4167 * ENOMEM - could not allocated memory.
4168 **/
4169static int
4170lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4171 uint8_t *vpd, uint32_t *vpd_size)
4172{
4173 int rc = 0;
4174 uint32_t dma_size;
4175 struct lpfc_dmabuf *dmabuf;
4176 struct lpfc_mqe *mqe;
4177
4178 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4179 if (!dmabuf)
4180 return -ENOMEM;
4181
4182 /*
4183 * Get a DMA buffer for the vpd data resulting from the READ_REV
4184 * mailbox command.
4185 */
4186 dma_size = *vpd_size;
4187 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4188 dma_size,
4189 &dmabuf->phys,
4190 GFP_KERNEL);
4191 if (!dmabuf->virt) {
4192 kfree(dmabuf);
4193 return -ENOMEM;
4194 }
4195 memset(dmabuf->virt, 0, dma_size);
4196
4197 /*
4198 * The SLI4 implementation of READ_REV conflicts at word1,
4199 * bits 31:16 and SLI4 adds vpd functionality not present
4200 * in SLI3. This code corrects the conflicts.
4201 */
4202 lpfc_read_rev(phba, mboxq);
4203 mqe = &mboxq->u.mqe;
4204 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4205 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4206 mqe->un.read_rev.word1 &= 0x0000FFFF;
4207 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4208 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4209
4210 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4211 if (rc) {
4212 dma_free_coherent(&phba->pcidev->dev, dma_size,
4213 dmabuf->virt, dmabuf->phys);
4214 return -EIO;
4215 }
4216
4217 /*
4218 * The available vpd length cannot be bigger than the
4219 * DMA buffer passed to the port. Catch the less than
4220 * case and update the caller's size.
4221 */
4222 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4223 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4224
4225 lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
4226 dma_free_coherent(&phba->pcidev->dev, dma_size,
4227 dmabuf->virt, dmabuf->phys);
4228 kfree(dmabuf);
4229 return 0;
4230}
4231
4232/**
4233 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4234 * @phba: pointer to lpfc hba data structure.
4235 *
4236 * This routine is called to explicitly arm the SLI4 device's completion and
4237 * event queues
4238 **/
4239static void
4240lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4241{
4242 uint8_t fcp_eqidx;
4243
4244 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4245 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4246 lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
4247 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4248 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4249 LPFC_QUEUE_REARM);
4250 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4251 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4252 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4253 LPFC_QUEUE_REARM);
4254}
4255
4256/**
4257 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4258 * @phba: Pointer to HBA context object.
4259 *
4260 * This function is the main SLI4 device intialization PCI function. This
4261 * function is called by the HBA intialization code, HBA reset code and
4262 * HBA error attention handler code. Caller is not required to hold any
4263 * locks.
4264 **/
4265int
4266lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4267{
4268 int rc;
4269 LPFC_MBOXQ_t *mboxq;
4270 struct lpfc_mqe *mqe;
4271 uint8_t *vpd;
4272 uint32_t vpd_size;
4273 uint32_t ftr_rsp = 0;
4274 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
4275 struct lpfc_vport *vport = phba->pport;
4276 struct lpfc_dmabuf *mp;
4277
4278 /* Perform a PCI function reset to start from clean */
4279 rc = lpfc_pci_function_reset(phba);
4280 if (unlikely(rc))
4281 return -ENODEV;
4282
4283 /* Check the HBA Host Status Register for readyness */
4284 rc = lpfc_sli4_post_status_check(phba);
4285 if (unlikely(rc))
4286 return -ENODEV;
4287 else {
4288 spin_lock_irq(&phba->hbalock);
4289 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
4290 spin_unlock_irq(&phba->hbalock);
4291 }
4292
4293 /*
4294 * Allocate a single mailbox container for initializing the
4295 * port.
4296 */
4297 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4298 if (!mboxq)
4299 return -ENOMEM;
4300
4301 /*
4302 * Continue initialization with default values even if driver failed
4303 * to read FCoE param config regions
4304 */
4305 if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4306 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4307 "2570 Failed to read FCoE parameters \n");
4308
4309 /* Issue READ_REV to collect vpd and FW information. */
4310 vpd_size = PAGE_SIZE;
4311 vpd = kzalloc(vpd_size, GFP_KERNEL);
4312 if (!vpd) {
4313 rc = -ENOMEM;
4314 goto out_free_mbox;
4315 }
4316
4317 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
4318 if (unlikely(rc))
4319 goto out_free_vpd;
4320
4321 mqe = &mboxq->u.mqe;
4322 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
4323 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
4324 phba->hba_flag |= HBA_FCOE_SUPPORT;
4325 if (phba->sli_rev != LPFC_SLI_REV4 ||
4326 !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
4327 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4328 "0376 READ_REV Error. SLI Level %d "
4329 "FCoE enabled %d\n",
4330 phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT);
4331 rc = -EIO;
4332 goto out_free_vpd;
4333 }
4334 /*
4335 * Evaluate the read rev and vpd data. Populate the driver
4336 * state with the results. If this routine fails, the failure
4337 * is not fatal as the driver will use generic values.
4338 */
4339 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
4340 if (unlikely(!rc)) {
4341 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4342 "0377 Error %d parsing vpd. "
4343 "Using defaults.\n", rc);
4344 rc = 0;
4345 }
4346
4347 /* Save information as VPD data */
4348 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
4349 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
4350 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
4351 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
4352 &mqe->un.read_rev);
4353 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
4354 &mqe->un.read_rev);
4355 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
4356 &mqe->un.read_rev);
4357 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
4358 &mqe->un.read_rev);
4359 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
4360 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
4361 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
4362 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
4363 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
4364 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
4365 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4366 "(%d):0380 READ_REV Status x%x "
4367 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
4368 mboxq->vport ? mboxq->vport->vpi : 0,
4369 bf_get(lpfc_mqe_status, mqe),
4370 phba->vpd.rev.opFwName,
4371 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
4372 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
4373
4374 /*
4375 * Discover the port's supported feature set and match it against the
4376 * hosts requests.
4377 */
4378 lpfc_request_features(phba, mboxq);
4379 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4380 if (unlikely(rc)) {
4381 rc = -EIO;
4382 goto out_free_vpd;
4383 }
4384
4385 /*
4386 * The port must support FCP initiator mode as this is the
4387 * only mode running in the host.
4388 */
4389 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
4390 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4391 "0378 No support for fcpi mode.\n");
4392 ftr_rsp++;
4393 }
4394
4395 /*
4396 * If the port cannot support the host's requested features
4397 * then turn off the global config parameters to disable the
4398 * feature in the driver. This is not a fatal error.
4399 */
4400 if ((phba->cfg_enable_bg) &&
4401 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4402 ftr_rsp++;
4403
4404 if (phba->max_vpi && phba->cfg_enable_npiv &&
4405 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4406 ftr_rsp++;
4407
4408 if (ftr_rsp) {
4409 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4410 "0379 Feature Mismatch Data: x%08x %08x "
4411 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
4412 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
4413 phba->cfg_enable_npiv, phba->max_vpi);
4414 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4415 phba->cfg_enable_bg = 0;
4416 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4417 phba->cfg_enable_npiv = 0;
4418 }
4419
4420 /* These SLI3 features are assumed in SLI4 */
4421 spin_lock_irq(&phba->hbalock);
4422 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
4423 spin_unlock_irq(&phba->hbalock);
4424
4425 /* Read the port's service parameters. */
4426 lpfc_read_sparam(phba, mboxq, vport->vpi);
4427 mboxq->vport = vport;
4428 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4429 mp = (struct lpfc_dmabuf *) mboxq->context1;
4430 if (rc == MBX_SUCCESS) {
4431 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
4432 rc = 0;
4433 }
4434
4435 /*
4436 * This memory was allocated by the lpfc_read_sparam routine. Release
4437 * it to the mbuf pool.
4438 */
4439 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4440 kfree(mp);
4441 mboxq->context1 = NULL;
4442 if (unlikely(rc)) {
4443 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4444 "0382 READ_SPARAM command failed "
4445 "status %d, mbxStatus x%x\n",
4446 rc, bf_get(lpfc_mqe_status, mqe));
4447 phba->link_state = LPFC_HBA_ERROR;
4448 rc = -EIO;
4449 goto out_free_vpd;
4450 }
4451
4452 if (phba->cfg_soft_wwnn)
4453 u64_to_wwn(phba->cfg_soft_wwnn,
4454 vport->fc_sparam.nodeName.u.wwn);
4455 if (phba->cfg_soft_wwpn)
4456 u64_to_wwn(phba->cfg_soft_wwpn,
4457 vport->fc_sparam.portName.u.wwn);
4458 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
4459 sizeof(struct lpfc_name));
4460 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
4461 sizeof(struct lpfc_name));
4462
4463 /* Update the fc_host data structures with new wwn. */
4464 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4465 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4466
4467 /* Register SGL pool to the device using non-embedded mailbox command */
4468 rc = lpfc_sli4_post_sgl_list(phba);
4469 if (unlikely(rc)) {
4470 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4471 "0582 Error %d during sgl post operation", rc);
4472 rc = -ENODEV;
4473 goto out_free_vpd;
4474 }
4475
4476 /* Register SCSI SGL pool to the device */
4477 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4478 if (unlikely(rc)) {
4479 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4480 "0383 Error %d during scsi sgl post opeation",
4481 rc);
4482 /* Some Scsi buffers were moved to the abort scsi list */
4483 /* A pci function reset will repost them */
4484 rc = -ENODEV;
4485 goto out_free_vpd;
4486 }
4487
4488 /* Post the rpi header region to the device. */
4489 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
4490 if (unlikely(rc)) {
4491 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4492 "0393 Error %d during rpi post operation\n",
4493 rc);
4494 rc = -ENODEV;
4495 goto out_free_vpd;
4496 }
4497 if (phba->cfg_enable_fip)
4498 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1);
4499 else
4500 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4501
4502 /* Set up all the queues to the device */
4503 rc = lpfc_sli4_queue_setup(phba);
4504 if (unlikely(rc)) {
4505 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4506 "0381 Error %d during queue setup.\n ", rc);
4507 goto out_stop_timers;
4508 }
4509
4510 /* Arm the CQs and then EQs on device */
4511 lpfc_sli4_arm_cqeq_intr(phba);
4512
4513 /* Indicate device interrupt mode */
4514 phba->sli4_hba.intr_enable = 1;
4515
4516 /* Allow asynchronous mailbox command to go through */
4517 spin_lock_irq(&phba->hbalock);
4518 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4519 spin_unlock_irq(&phba->hbalock);
4520
4521 /* Post receive buffers to the device */
4522 lpfc_sli4_rb_setup(phba);
4523
4524 /* Start the ELS watchdog timer */
4525 /*
4526 * The driver for SLI4 is not yet ready to process timeouts
4527 * or interrupts. Once it is, the comment bars can be removed.
4528 */
4529 /* mod_timer(&vport->els_tmofunc,
4530 * jiffies + HZ * (phba->fc_ratov*2)); */
4531
4532 /* Start heart beat timer */
4533 mod_timer(&phba->hb_tmofunc,
4534 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
4535 phba->hb_outstanding = 0;
4536 phba->last_completion_time = jiffies;
4537
4538 /* Start error attention (ERATT) polling timer */
4539 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
4540
4541 /*
4542 * The port is ready, set the host's link state to LINK_DOWN
4543 * in preparation for link interrupts.
4544 */
4545 lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
4546 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4547 lpfc_set_loopback_flag(phba);
4548 /* Change driver state to LPFC_LINK_DOWN right before init link */
4549 spin_lock_irq(&phba->hbalock);
4550 phba->link_state = LPFC_LINK_DOWN;
4551 spin_unlock_irq(&phba->hbalock);
4552 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
4553 if (unlikely(rc != MBX_NOT_FINISHED)) {
4554 kfree(vpd);
4555 return 0;
4556 } else
4557 rc = -EIO;
4558
4559 /* Unset all the queues set up in this routine when error out */
4560 if (rc)
4561 lpfc_sli4_queue_unset(phba);
4562
4563out_stop_timers:
4564 if (rc)
4565 lpfc_stop_hba_timers(phba);
4566out_free_vpd:
4567 kfree(vpd);
4568out_free_mbox:
4569 mempool_free(mboxq, phba->mbox_mem_pool);
4570 return rc;
4571}
3203 4572
3204/** 4573/**
3205 * lpfc_mbox_timeout - Timeout call back function for mbox timer 4574 * lpfc_mbox_timeout - Timeout call back function for mbox timer
@@ -3244,7 +4613,7 @@ void
3244lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 4613lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3245{ 4614{
3246 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 4615 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
3247 MAILBOX_t *mb = &pmbox->mb; 4616 MAILBOX_t *mb = &pmbox->u.mb;
3248 struct lpfc_sli *psli = &phba->sli; 4617 struct lpfc_sli *psli = &phba->sli;
3249 struct lpfc_sli_ring *pring; 4618 struct lpfc_sli_ring *pring;
3250 4619
@@ -3281,7 +4650,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3281 spin_unlock_irq(&phba->pport->work_port_lock); 4650 spin_unlock_irq(&phba->pport->work_port_lock);
3282 spin_lock_irq(&phba->hbalock); 4651 spin_lock_irq(&phba->hbalock);
3283 phba->link_state = LPFC_LINK_UNKNOWN; 4652 phba->link_state = LPFC_LINK_UNKNOWN;
3284 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 4653 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3285 spin_unlock_irq(&phba->hbalock); 4654 spin_unlock_irq(&phba->hbalock);
3286 4655
3287 pring = &psli->ring[psli->fcp_ring]; 4656 pring = &psli->ring[psli->fcp_ring];
@@ -3289,32 +4658,20 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3289 4658
3290 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4659 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3291 "0345 Resetting board due to mailbox timeout\n"); 4660 "0345 Resetting board due to mailbox timeout\n");
3292 /* 4661
3293 * lpfc_offline calls lpfc_sli_hba_down which will clean up 4662 /* Reset the HBA device */
3294 * on oustanding mailbox commands. 4663 lpfc_reset_hba(phba);
3295 */
3296 /* If resets are disabled then set error state and return. */
3297 if (!phba->cfg_enable_hba_reset) {
3298 phba->link_state = LPFC_HBA_ERROR;
3299 return;
3300 }
3301 lpfc_offline_prep(phba);
3302 lpfc_offline(phba);
3303 lpfc_sli_brdrestart(phba);
3304 lpfc_online(phba);
3305 lpfc_unblock_mgmt_io(phba);
3306 return;
3307} 4664}
3308 4665
3309/** 4666/**
3310 * lpfc_sli_issue_mbox - Issue a mailbox command to firmware 4667 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
3311 * @phba: Pointer to HBA context object. 4668 * @phba: Pointer to HBA context object.
3312 * @pmbox: Pointer to mailbox object. 4669 * @pmbox: Pointer to mailbox object.
3313 * @flag: Flag indicating how the mailbox need to be processed. 4670 * @flag: Flag indicating how the mailbox need to be processed.
3314 * 4671 *
3315 * This function is called by discovery code and HBA management code 4672 * This function is called by discovery code and HBA management code
3316 * to submit a mailbox command to firmware. This function gets the 4673 * to submit a mailbox command to firmware with SLI-3 interface spec. This
3317 * hbalock to protect the data structures. 4674 * function gets the hbalock to protect the data structures.
3318 * The mailbox command can be submitted in polling mode, in which case 4675 * The mailbox command can be submitted in polling mode, in which case
3319 * this function will wait in a polling loop for the completion of the 4676 * this function will wait in a polling loop for the completion of the
3320 * mailbox. 4677 * mailbox.
@@ -3332,8 +4689,9 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3332 * return codes the caller owns the mailbox command after the return of 4689 * return codes the caller owns the mailbox command after the return of
3333 * the function. 4690 * the function.
3334 **/ 4691 **/
3335int 4692static int
3336lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 4693lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
4694 uint32_t flag)
3337{ 4695{
3338 MAILBOX_t *mb; 4696 MAILBOX_t *mb;
3339 struct lpfc_sli *psli = &phba->sli; 4697 struct lpfc_sli *psli = &phba->sli;
@@ -3349,6 +4707,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3349 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4707 spin_lock_irqsave(&phba->hbalock, drvr_flag);
3350 if (!pmbox) { 4708 if (!pmbox) {
3351 /* processing mbox queue from intr_handler */ 4709 /* processing mbox queue from intr_handler */
4710 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
4711 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4712 return MBX_SUCCESS;
4713 }
3352 processing_queue = 1; 4714 processing_queue = 1;
3353 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4715 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3354 pmbox = lpfc_mbox_get(phba); 4716 pmbox = lpfc_mbox_get(phba);
@@ -3365,7 +4727,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3365 lpfc_printf_log(phba, KERN_ERR, 4727 lpfc_printf_log(phba, KERN_ERR,
3366 LOG_MBOX | LOG_VPORT, 4728 LOG_MBOX | LOG_VPORT,
3367 "1806 Mbox x%x failed. No vport\n", 4729 "1806 Mbox x%x failed. No vport\n",
3368 pmbox->mb.mbxCommand); 4730 pmbox->u.mb.mbxCommand);
3369 dump_stack(); 4731 dump_stack();
3370 goto out_not_finished; 4732 goto out_not_finished;
3371 } 4733 }
@@ -3385,21 +4747,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3385 4747
3386 psli = &phba->sli; 4748 psli = &phba->sli;
3387 4749
3388 mb = &pmbox->mb; 4750 mb = &pmbox->u.mb;
3389 status = MBX_SUCCESS; 4751 status = MBX_SUCCESS;
3390 4752
3391 if (phba->link_state == LPFC_HBA_ERROR) { 4753 if (phba->link_state == LPFC_HBA_ERROR) {
3392 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4754 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3393 4755
3394 /* Mbox command <mbxCommand> cannot issue */ 4756 /* Mbox command <mbxCommand> cannot issue */
3395 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4757 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4758 "(%d):0311 Mailbox command x%x cannot "
4759 "issue Data: x%x x%x\n",
4760 pmbox->vport ? pmbox->vport->vpi : 0,
4761 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
3396 goto out_not_finished; 4762 goto out_not_finished;
3397 } 4763 }
3398 4764
3399 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 4765 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
3400 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 4766 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
3401 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4767 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3402 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4768 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4769 "(%d):2528 Mailbox command x%x cannot "
4770 "issue Data: x%x x%x\n",
4771 pmbox->vport ? pmbox->vport->vpi : 0,
4772 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
3403 goto out_not_finished; 4773 goto out_not_finished;
3404 } 4774 }
3405 4775
@@ -3413,14 +4783,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3413 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4783 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3414 4784
3415 /* Mbox command <mbxCommand> cannot issue */ 4785 /* Mbox command <mbxCommand> cannot issue */
3416 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4786 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4787 "(%d):2529 Mailbox command x%x "
4788 "cannot issue Data: x%x x%x\n",
4789 pmbox->vport ? pmbox->vport->vpi : 0,
4790 pmbox->u.mb.mbxCommand,
4791 psli->sli_flag, flag);
3417 goto out_not_finished; 4792 goto out_not_finished;
3418 } 4793 }
3419 4794
3420 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 4795 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
3421 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4796 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3422 /* Mbox command <mbxCommand> cannot issue */ 4797 /* Mbox command <mbxCommand> cannot issue */
3423 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4799 "(%d):2530 Mailbox command x%x "
4800 "cannot issue Data: x%x x%x\n",
4801 pmbox->vport ? pmbox->vport->vpi : 0,
4802 pmbox->u.mb.mbxCommand,
4803 psli->sli_flag, flag);
3424 goto out_not_finished; 4804 goto out_not_finished;
3425 } 4805 }
3426 4806
@@ -3462,12 +4842,17 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3462 4842
3463 /* If we are not polling, we MUST be in SLI2 mode */ 4843 /* If we are not polling, we MUST be in SLI2 mode */
3464 if (flag != MBX_POLL) { 4844 if (flag != MBX_POLL) {
3465 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 4845 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
3466 (mb->mbxCommand != MBX_KILL_BOARD)) { 4846 (mb->mbxCommand != MBX_KILL_BOARD)) {
3467 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4847 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3468 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4848 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3469 /* Mbox command <mbxCommand> cannot issue */ 4849 /* Mbox command <mbxCommand> cannot issue */
3470 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4850 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4851 "(%d):2531 Mailbox command x%x "
4852 "cannot issue Data: x%x x%x\n",
4853 pmbox->vport ? pmbox->vport->vpi : 0,
4854 pmbox->u.mb.mbxCommand,
4855 psli->sli_flag, flag);
3471 goto out_not_finished; 4856 goto out_not_finished;
3472 } 4857 }
3473 /* timeout active mbox command */ 4858 /* timeout active mbox command */
@@ -3506,7 +4891,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3506 /* next set own bit for the adapter and copy over command word */ 4891 /* next set own bit for the adapter and copy over command word */
3507 mb->mbxOwner = OWN_CHIP; 4892 mb->mbxOwner = OWN_CHIP;
3508 4893
3509 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4894 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3510 /* First copy command data to host SLIM area */ 4895 /* First copy command data to host SLIM area */
3511 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 4896 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
3512 } else { 4897 } else {
@@ -3529,7 +4914,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3529 4914
3530 if (mb->mbxCommand == MBX_CONFIG_PORT) { 4915 if (mb->mbxCommand == MBX_CONFIG_PORT) {
3531 /* switch over to host mailbox */ 4916 /* switch over to host mailbox */
3532 psli->sli_flag |= LPFC_SLI2_ACTIVE; 4917 psli->sli_flag |= LPFC_SLI_ACTIVE;
3533 } 4918 }
3534 } 4919 }
3535 4920
@@ -3552,7 +4937,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3552 writel(CA_MBATT, phba->CAregaddr); 4937 writel(CA_MBATT, phba->CAregaddr);
3553 readl(phba->CAregaddr); /* flush */ 4938 readl(phba->CAregaddr); /* flush */
3554 4939
3555 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4940 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3556 /* First read mbox status word */ 4941 /* First read mbox status word */
3557 word0 = *((uint32_t *)phba->mbox); 4942 word0 = *((uint32_t *)phba->mbox);
3558 word0 = le32_to_cpu(word0); 4943 word0 = le32_to_cpu(word0);
@@ -3591,7 +4976,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3591 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4976 spin_lock_irqsave(&phba->hbalock, drvr_flag);
3592 } 4977 }
3593 4978
3594 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4979 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3595 /* First copy command data */ 4980 /* First copy command data */
3596 word0 = *((uint32_t *)phba->mbox); 4981 word0 = *((uint32_t *)phba->mbox);
3597 word0 = le32_to_cpu(word0); 4982 word0 = le32_to_cpu(word0);
@@ -3604,7 +4989,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3604 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 4989 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
3605 && slimmb->mbxStatus) { 4990 && slimmb->mbxStatus) {
3606 psli->sli_flag &= 4991 psli->sli_flag &=
3607 ~LPFC_SLI2_ACTIVE; 4992 ~LPFC_SLI_ACTIVE;
3608 word0 = slimword0; 4993 word0 = slimword0;
3609 } 4994 }
3610 } 4995 }
@@ -3616,7 +5001,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3616 ha_copy = readl(phba->HAregaddr); 5001 ha_copy = readl(phba->HAregaddr);
3617 } 5002 }
3618 5003
3619 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 5004 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3620 /* copy results back to user */ 5005 /* copy results back to user */
3621 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 5006 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
3622 } else { 5007 } else {
@@ -3643,13 +5028,527 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3643 5028
3644out_not_finished: 5029out_not_finished:
3645 if (processing_queue) { 5030 if (processing_queue) {
3646 pmbox->mb.mbxStatus = MBX_NOT_FINISHED; 5031 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
3647 lpfc_mbox_cmpl_put(phba, pmbox); 5032 lpfc_mbox_cmpl_put(phba, pmbox);
3648 } 5033 }
3649 return MBX_NOT_FINISHED; 5034 return MBX_NOT_FINISHED;
3650} 5035}
3651 5036
3652/** 5037/**
5038 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
5039 * @phba: Pointer to HBA context object.
5040 *
5041 * The function blocks the posting of SLI4 asynchronous mailbox commands from
5042 * the driver internal pending mailbox queue. It will then try to wait out the
5043 * possible outstanding mailbox command before return.
5044 *
5045 * Returns:
5046 * 0 - the outstanding mailbox command completed; otherwise, the wait for
5047 * the outstanding mailbox command timed out.
5048 **/
5049static int
5050lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
5051{
5052 struct lpfc_sli *psli = &phba->sli;
5053 uint8_t actcmd = MBX_HEARTBEAT;
5054 int rc = 0;
5055 unsigned long timeout;
5056
5057 /* Mark the asynchronous mailbox command posting as blocked */
5058 spin_lock_irq(&phba->hbalock);
5059 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
5060 if (phba->sli.mbox_active)
5061 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
5062 spin_unlock_irq(&phba->hbalock);
5063 /* Determine how long we might wait for the active mailbox
5064 * command to be gracefully completed by firmware.
5065 */
5066 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
5067 jiffies;
5068 /* Wait for the outstnading mailbox command to complete */
5069 while (phba->sli.mbox_active) {
5070 /* Check active mailbox complete status every 2ms */
5071 msleep(2);
5072 if (time_after(jiffies, timeout)) {
5073 /* Timeout, marked the outstanding cmd not complete */
5074 rc = 1;
5075 break;
5076 }
5077 }
5078
5079 /* Can not cleanly block async mailbox command, fails it */
5080 if (rc) {
5081 spin_lock_irq(&phba->hbalock);
5082 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5083 spin_unlock_irq(&phba->hbalock);
5084 }
5085 return rc;
5086}
5087
5088/**
5089 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
5090 * @phba: Pointer to HBA context object.
5091 *
5092 * The function unblocks and resume posting of SLI4 asynchronous mailbox
5093 * commands from the driver internal pending mailbox queue. It makes sure
5094 * that there is no outstanding mailbox command before resuming posting
5095 * asynchronous mailbox commands. If, for any reason, there is outstanding
5096 * mailbox command, it will try to wait it out before resuming asynchronous
5097 * mailbox command posting.
5098 **/
5099static void
5100lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
5101{
5102 struct lpfc_sli *psli = &phba->sli;
5103
5104 spin_lock_irq(&phba->hbalock);
5105 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5106 /* Asynchronous mailbox posting is not blocked, do nothing */
5107 spin_unlock_irq(&phba->hbalock);
5108 return;
5109 }
5110
5111 /* Outstanding synchronous mailbox command is guaranteed to be done,
5112 * successful or timeout, after timing-out the outstanding mailbox
5113 * command shall always be removed, so just unblock posting async
5114 * mailbox command and resume
5115 */
5116 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5117 spin_unlock_irq(&phba->hbalock);
5118
5119 /* wake up worker thread to post asynchronlous mailbox command */
5120 lpfc_worker_wake_up(phba);
5121}
5122
5123/**
5124 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
5125 * @phba: Pointer to HBA context object.
5126 * @mboxq: Pointer to mailbox object.
5127 *
5128 * The function posts a mailbox to the port. The mailbox is expected
5129 * to be comletely filled in and ready for the port to operate on it.
5130 * This routine executes a synchronous completion operation on the
5131 * mailbox by polling for its completion.
5132 *
5133 * The caller must not be holding any locks when calling this routine.
5134 *
5135 * Returns:
5136 * MBX_SUCCESS - mailbox posted successfully
5137 * Any of the MBX error values.
5138 **/
5139static int
5140lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5141{
5142 int rc = MBX_SUCCESS;
5143 unsigned long iflag;
5144 uint32_t db_ready;
5145 uint32_t mcqe_status;
5146 uint32_t mbx_cmnd;
5147 unsigned long timeout;
5148 struct lpfc_sli *psli = &phba->sli;
5149 struct lpfc_mqe *mb = &mboxq->u.mqe;
5150 struct lpfc_bmbx_create *mbox_rgn;
5151 struct dma_address *dma_address;
5152 struct lpfc_register bmbx_reg;
5153
5154 /*
5155 * Only one mailbox can be active to the bootstrap mailbox region
5156 * at a time and there is no queueing provided.
5157 */
5158 spin_lock_irqsave(&phba->hbalock, iflag);
5159 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5160 spin_unlock_irqrestore(&phba->hbalock, iflag);
5161 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5162 "(%d):2532 Mailbox command x%x (x%x) "
5163 "cannot issue Data: x%x x%x\n",
5164 mboxq->vport ? mboxq->vport->vpi : 0,
5165 mboxq->u.mb.mbxCommand,
5166 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5167 psli->sli_flag, MBX_POLL);
5168 return MBXERR_ERROR;
5169 }
5170 /* The server grabs the token and owns it until release */
5171 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5172 phba->sli.mbox_active = mboxq;
5173 spin_unlock_irqrestore(&phba->hbalock, iflag);
5174
5175 /*
5176 * Initialize the bootstrap memory region to avoid stale data areas
5177 * in the mailbox post. Then copy the caller's mailbox contents to
5178 * the bmbx mailbox region.
5179 */
5180 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
5181 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
5182 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
5183 sizeof(struct lpfc_mqe));
5184
5185 /* Post the high mailbox dma address to the port and wait for ready. */
5186 dma_address = &phba->sli4_hba.bmbx.dma_address;
5187 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
5188
5189 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5190 * 1000) + jiffies;
5191 do {
5192 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5193 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5194 if (!db_ready)
5195 msleep(2);
5196
5197 if (time_after(jiffies, timeout)) {
5198 rc = MBXERR_ERROR;
5199 goto exit;
5200 }
5201 } while (!db_ready);
5202
5203 /* Post the low mailbox dma address to the port. */
5204 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
5205 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5206 * 1000) + jiffies;
5207 do {
5208 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5209 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5210 if (!db_ready)
5211 msleep(2);
5212
5213 if (time_after(jiffies, timeout)) {
5214 rc = MBXERR_ERROR;
5215 goto exit;
5216 }
5217 } while (!db_ready);
5218
5219 /*
5220 * Read the CQ to ensure the mailbox has completed.
5221 * If so, update the mailbox status so that the upper layers
5222 * can complete the request normally.
5223 */
5224 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
5225 sizeof(struct lpfc_mqe));
5226 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
5227 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
5228 sizeof(struct lpfc_mcqe));
5229 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
5230
5231 /* Prefix the mailbox status with range x4000 to note SLI4 status. */
5232 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
5233 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
5234 rc = MBXERR_ERROR;
5235 }
5236
5237 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5238 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
5239 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
5240 " x%x x%x CQ: x%x x%x x%x x%x\n",
5241 mboxq->vport ? mboxq->vport->vpi : 0,
5242 mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
5243 bf_get(lpfc_mqe_status, mb),
5244 mb->un.mb_words[0], mb->un.mb_words[1],
5245 mb->un.mb_words[2], mb->un.mb_words[3],
5246 mb->un.mb_words[4], mb->un.mb_words[5],
5247 mb->un.mb_words[6], mb->un.mb_words[7],
5248 mb->un.mb_words[8], mb->un.mb_words[9],
5249 mb->un.mb_words[10], mb->un.mb_words[11],
5250 mb->un.mb_words[12], mboxq->mcqe.word0,
5251 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5252 mboxq->mcqe.trailer);
5253exit:
5254 /* We are holding the token, no needed for lock when release */
5255 spin_lock_irqsave(&phba->hbalock, iflag);
5256 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5257 phba->sli.mbox_active = NULL;
5258 spin_unlock_irqrestore(&phba->hbalock, iflag);
5259 return rc;
5260}
5261
5262/**
5263 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
5264 * @phba: Pointer to HBA context object.
5265 * @pmbox: Pointer to mailbox object.
5266 * @flag: Flag indicating how the mailbox need to be processed.
5267 *
5268 * This function is called by discovery code and HBA management code to submit
5269 * a mailbox command to firmware with SLI-4 interface spec.
5270 *
5271 * Return codes the caller owns the mailbox command after the return of the
5272 * function.
5273 **/
5274static int
5275lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5276 uint32_t flag)
5277{
5278 struct lpfc_sli *psli = &phba->sli;
5279 unsigned long iflags;
5280 int rc;
5281
5282 /* Detect polling mode and jump to a handler */
5283 if (!phba->sli4_hba.intr_enable) {
5284 if (flag == MBX_POLL)
5285 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5286 else
5287 rc = -EIO;
5288 if (rc != MBX_SUCCESS)
5289 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5290 "(%d):2541 Mailbox command x%x "
5291 "(x%x) cannot issue Data: x%x x%x\n",
5292 mboxq->vport ? mboxq->vport->vpi : 0,
5293 mboxq->u.mb.mbxCommand,
5294 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5295 psli->sli_flag, flag);
5296 return rc;
5297 } else if (flag == MBX_POLL) {
5298 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
5299 "(%d):2542 Try to issue mailbox command "
5300 "x%x (x%x) synchronously ahead of async"
5301 "mailbox command queue: x%x x%x\n",
5302 mboxq->vport ? mboxq->vport->vpi : 0,
5303 mboxq->u.mb.mbxCommand,
5304 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5305 psli->sli_flag, flag);
5306 /* Try to block the asynchronous mailbox posting */
5307 rc = lpfc_sli4_async_mbox_block(phba);
5308 if (!rc) {
5309 /* Successfully blocked, now issue sync mbox cmd */
5310 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5311 if (rc != MBX_SUCCESS)
5312 lpfc_printf_log(phba, KERN_ERR,
5313 LOG_MBOX | LOG_SLI,
5314 "(%d):2597 Mailbox command "
5315 "x%x (x%x) cannot issue "
5316 "Data: x%x x%x\n",
5317 mboxq->vport ?
5318 mboxq->vport->vpi : 0,
5319 mboxq->u.mb.mbxCommand,
5320 lpfc_sli4_mbox_opcode_get(phba,
5321 mboxq),
5322 psli->sli_flag, flag);
5323 /* Unblock the async mailbox posting afterward */
5324 lpfc_sli4_async_mbox_unblock(phba);
5325 }
5326 return rc;
5327 }
5328
5329 /* Now, interrupt mode asynchrous mailbox command */
5330 rc = lpfc_mbox_cmd_check(phba, mboxq);
5331 if (rc) {
5332 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5333 "(%d):2543 Mailbox command x%x (x%x) "
5334 "cannot issue Data: x%x x%x\n",
5335 mboxq->vport ? mboxq->vport->vpi : 0,
5336 mboxq->u.mb.mbxCommand,
5337 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5338 psli->sli_flag, flag);
5339 goto out_not_finished;
5340 }
5341 rc = lpfc_mbox_dev_check(phba);
5342 if (unlikely(rc)) {
5343 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5344 "(%d):2544 Mailbox command x%x (x%x) "
5345 "cannot issue Data: x%x x%x\n",
5346 mboxq->vport ? mboxq->vport->vpi : 0,
5347 mboxq->u.mb.mbxCommand,
5348 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5349 psli->sli_flag, flag);
5350 goto out_not_finished;
5351 }
5352
5353 /* Put the mailbox command to the driver internal FIFO */
5354 psli->slistat.mbox_busy++;
5355 spin_lock_irqsave(&phba->hbalock, iflags);
5356 lpfc_mbox_put(phba, mboxq);
5357 spin_unlock_irqrestore(&phba->hbalock, iflags);
5358 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5359 "(%d):0354 Mbox cmd issue - Enqueue Data: "
5360 "x%x (x%x) x%x x%x x%x\n",
5361 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
5362 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5363 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5364 phba->pport->port_state,
5365 psli->sli_flag, MBX_NOWAIT);
5366 /* Wake up worker thread to transport mailbox command from head */
5367 lpfc_worker_wake_up(phba);
5368
5369 return MBX_BUSY;
5370
5371out_not_finished:
5372 return MBX_NOT_FINISHED;
5373}
5374
5375/**
5376 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
5377 * @phba: Pointer to HBA context object.
5378 *
5379 * This function is called by worker thread to send a mailbox command to
5380 * SLI4 HBA firmware.
5381 *
5382 **/
5383int
5384lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
5385{
5386 struct lpfc_sli *psli = &phba->sli;
5387 LPFC_MBOXQ_t *mboxq;
5388 int rc = MBX_SUCCESS;
5389 unsigned long iflags;
5390 struct lpfc_mqe *mqe;
5391 uint32_t mbx_cmnd;
5392
5393 /* Check interrupt mode before post async mailbox command */
5394 if (unlikely(!phba->sli4_hba.intr_enable))
5395 return MBX_NOT_FINISHED;
5396
5397 /* Check for mailbox command service token */
5398 spin_lock_irqsave(&phba->hbalock, iflags);
5399 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5400 spin_unlock_irqrestore(&phba->hbalock, iflags);
5401 return MBX_NOT_FINISHED;
5402 }
5403 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5404 spin_unlock_irqrestore(&phba->hbalock, iflags);
5405 return MBX_NOT_FINISHED;
5406 }
5407 if (unlikely(phba->sli.mbox_active)) {
5408 spin_unlock_irqrestore(&phba->hbalock, iflags);
5409 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5410 "0384 There is pending active mailbox cmd\n");
5411 return MBX_NOT_FINISHED;
5412 }
5413 /* Take the mailbox command service token */
5414 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5415
5416 /* Get the next mailbox command from head of queue */
5417 mboxq = lpfc_mbox_get(phba);
5418
5419 /* If no more mailbox command waiting for post, we're done */
5420 if (!mboxq) {
5421 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5422 spin_unlock_irqrestore(&phba->hbalock, iflags);
5423 return MBX_SUCCESS;
5424 }
5425 phba->sli.mbox_active = mboxq;
5426 spin_unlock_irqrestore(&phba->hbalock, iflags);
5427
5428 /* Check device readiness for posting mailbox command */
5429 rc = lpfc_mbox_dev_check(phba);
5430 if (unlikely(rc))
5431 /* Driver clean routine will clean up pending mailbox */
5432 goto out_not_finished;
5433
5434 /* Prepare the mbox command to be posted */
5435 mqe = &mboxq->u.mqe;
5436 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
5437
5438 /* Start timer for the mbox_tmo and log some mailbox post messages */
5439 mod_timer(&psli->mbox_tmo, (jiffies +
5440 (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
5441
5442 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5443 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
5444 "x%x x%x\n",
5445 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
5446 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5447 phba->pport->port_state, psli->sli_flag);
5448
5449 if (mbx_cmnd != MBX_HEARTBEAT) {
5450 if (mboxq->vport) {
5451 lpfc_debugfs_disc_trc(mboxq->vport,
5452 LPFC_DISC_TRC_MBOX_VPORT,
5453 "MBOX Send vport: cmd:x%x mb:x%x x%x",
5454 mbx_cmnd, mqe->un.mb_words[0],
5455 mqe->un.mb_words[1]);
5456 } else {
5457 lpfc_debugfs_disc_trc(phba->pport,
5458 LPFC_DISC_TRC_MBOX,
5459 "MBOX Send: cmd:x%x mb:x%x x%x",
5460 mbx_cmnd, mqe->un.mb_words[0],
5461 mqe->un.mb_words[1]);
5462 }
5463 }
5464 psli->slistat.mbox_cmd++;
5465
5466 /* Post the mailbox command to the port */
5467 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
5468 if (rc != MBX_SUCCESS) {
5469 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5470 "(%d):2533 Mailbox command x%x (x%x) "
5471 "cannot issue Data: x%x x%x\n",
5472 mboxq->vport ? mboxq->vport->vpi : 0,
5473 mboxq->u.mb.mbxCommand,
5474 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5475 psli->sli_flag, MBX_NOWAIT);
5476 goto out_not_finished;
5477 }
5478
5479 return rc;
5480
5481out_not_finished:
5482 spin_lock_irqsave(&phba->hbalock, iflags);
5483 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
5484 __lpfc_mbox_cmpl_put(phba, mboxq);
5485 /* Release the token */
5486 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5487 phba->sli.mbox_active = NULL;
5488 spin_unlock_irqrestore(&phba->hbalock, iflags);
5489
5490 return MBX_NOT_FINISHED;
5491}
5492
5493/**
5494 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
5495 * @phba: Pointer to HBA context object.
5496 * @pmbox: Pointer to mailbox object.
5497 * @flag: Flag indicating how the mailbox need to be processed.
5498 *
5499 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
5500 * the API jump table function pointer from the lpfc_hba struct.
5501 *
5502 * Return codes the caller owns the mailbox command after the return of the
5503 * function.
5504 **/
5505int
5506lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
5507{
5508 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
5509}
5510
5511/**
5512 * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
5513 * @phba: The hba struct for which this call is being executed.
5514 * @dev_grp: The HBA PCI-Device group number.
5515 *
5516 * This routine sets up the mbox interface API function jump table in @phba
5517 * struct.
5518 * Returns: 0 - success, -ENODEV - failure.
5519 **/
5520int
5521lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5522{
5523
5524 switch (dev_grp) {
5525 case LPFC_PCI_DEV_LP:
5526 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
5527 phba->lpfc_sli_handle_slow_ring_event =
5528 lpfc_sli_handle_slow_ring_event_s3;
5529 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
5530 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
5531 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
5532 break;
5533 case LPFC_PCI_DEV_OC:
5534 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
5535 phba->lpfc_sli_handle_slow_ring_event =
5536 lpfc_sli_handle_slow_ring_event_s4;
5537 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
5538 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
5539 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
5540 break;
5541 default:
5542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5543 "1420 Invalid HBA PCI-device group: 0x%x\n",
5544 dev_grp);
5545 return -ENODEV;
5546 break;
5547 }
5548 return 0;
5549}
5550
5551/**
3653 * __lpfc_sli_ringtx_put - Add an iocb to the txq 5552 * __lpfc_sli_ringtx_put - Add an iocb to the txq
3654 * @phba: Pointer to HBA context object. 5553 * @phba: Pointer to HBA context object.
3655 * @pring: Pointer to driver SLI ring object. 5554 * @pring: Pointer to driver SLI ring object.
@@ -3701,35 +5600,34 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3701} 5600}
3702 5601
3703/** 5602/**
3704 * __lpfc_sli_issue_iocb - Lockless version of lpfc_sli_issue_iocb 5603 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
3705 * @phba: Pointer to HBA context object. 5604 * @phba: Pointer to HBA context object.
3706 * @pring: Pointer to driver SLI ring object. 5605 * @ring_number: SLI ring number to issue iocb on.
3707 * @piocb: Pointer to command iocb. 5606 * @piocb: Pointer to command iocb.
3708 * @flag: Flag indicating if this command can be put into txq. 5607 * @flag: Flag indicating if this command can be put into txq.
3709 * 5608 *
3710 * __lpfc_sli_issue_iocb is used by other functions in the driver 5609 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
3711 * to issue an iocb command to the HBA. If the PCI slot is recovering 5610 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
3712 * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT 5611 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
3713 * flag is turned on, the function returns IOCB_ERROR. 5612 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
3714 * When the link is down, this function allows only iocbs for 5613 * this function allows only iocbs for posting buffers. This function finds
3715 * posting buffers. 5614 * next available slot in the command ring and posts the command to the
3716 * This function finds next available slot in the command ring and 5615 * available slot and writes the port attention register to request HBA start
3717 * posts the command to the available slot and writes the port 5616 * processing new iocb. If there is no slot available in the ring and
3718 * attention register to request HBA start processing new iocb. 5617 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
3719 * If there is no slot available in the ring and 5618 * the function returns IOCB_BUSY.
3720 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the 5619 *
3721 * txq, otherwise the function returns IOCB_BUSY. 5620 * This function is called with hbalock held. The function will return success
3722 * 5621 * after it successfully submit the iocb to firmware or after adding to the
3723 * This function is called with hbalock held. 5622 * txq.
3724 * The function will return success after it successfully submit the
3725 * iocb to firmware or after adding to the txq.
3726 **/ 5623 **/
3727static int 5624static int
3728__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 5625__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
3729 struct lpfc_iocbq *piocb, uint32_t flag) 5626 struct lpfc_iocbq *piocb, uint32_t flag)
3730{ 5627{
3731 struct lpfc_iocbq *nextiocb; 5628 struct lpfc_iocbq *nextiocb;
3732 IOCB_t *iocb; 5629 IOCB_t *iocb;
5630 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
3733 5631
3734 if (piocb->iocb_cmpl && (!piocb->vport) && 5632 if (piocb->iocb_cmpl && (!piocb->vport) &&
3735 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 5633 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -3833,6 +5731,493 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3833 return IOCB_BUSY; 5731 return IOCB_BUSY;
3834} 5732}
3835 5733
5734/**
5735 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
5736 * @phba: Pointer to HBA context object.
5737 * @piocb: Pointer to command iocb.
5738 * @sglq: Pointer to the scatter gather queue object.
5739 *
5740 * This routine converts the bpl or bde that is in the IOCB
5741 * to a sgl list for the sli4 hardware. The physical address
5742 * of the bpl/bde is converted back to a virtual address.
5743 * If the IOCB contains a BPL then the list of BDE's is
5744 * converted to sli4_sge's. If the IOCB contains a single
5745 * BDE then it is converted to a single sli_sge.
5746 * The IOCB is still in cpu endianess so the contents of
5747 * the bpl can be used without byte swapping.
5748 *
5749 * Returns valid XRI = Success, NO_XRI = Failure.
5750**/
5751static uint16_t
5752lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5753 struct lpfc_sglq *sglq)
5754{
5755 uint16_t xritag = NO_XRI;
5756 struct ulp_bde64 *bpl = NULL;
5757 struct ulp_bde64 bde;
5758 struct sli4_sge *sgl = NULL;
5759 IOCB_t *icmd;
5760 int numBdes = 0;
5761 int i = 0;
5762
5763 if (!piocbq || !sglq)
5764 return xritag;
5765
5766 sgl = (struct sli4_sge *)sglq->sgl;
5767 icmd = &piocbq->iocb;
5768 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5769 numBdes = icmd->un.genreq64.bdl.bdeSize /
5770 sizeof(struct ulp_bde64);
5771 /* The addrHigh and addrLow fields within the IOCB
5772 * have not been byteswapped yet so there is no
5773 * need to swap them back.
5774 */
5775 bpl = (struct ulp_bde64 *)
5776 ((struct lpfc_dmabuf *)piocbq->context3)->virt;
5777
5778 if (!bpl)
5779 return xritag;
5780
5781 for (i = 0; i < numBdes; i++) {
5782 /* Should already be byte swapped. */
5783 sgl->addr_hi = bpl->addrHigh;
5784 sgl->addr_lo = bpl->addrLow;
5785 /* swap the size field back to the cpu so we
5786 * can assign it to the sgl.
5787 */
5788 bde.tus.w = le32_to_cpu(bpl->tus.w);
5789 bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
5790 if ((i+1) == numBdes)
5791 bf_set(lpfc_sli4_sge_last, sgl, 1);
5792 else
5793 bf_set(lpfc_sli4_sge_last, sgl, 0);
5794 sgl->word2 = cpu_to_le32(sgl->word2);
5795 sgl->word3 = cpu_to_le32(sgl->word3);
5796 bpl++;
5797 sgl++;
5798 }
5799 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
5800 /* The addrHigh and addrLow fields of the BDE have not
5801 * been byteswapped yet so they need to be swapped
5802 * before putting them in the sgl.
5803 */
5804 sgl->addr_hi =
5805 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
5806 sgl->addr_lo =
5807 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
5808 bf_set(lpfc_sli4_sge_len, sgl,
5809 icmd->un.genreq64.bdl.bdeSize);
5810 bf_set(lpfc_sli4_sge_last, sgl, 1);
5811 sgl->word2 = cpu_to_le32(sgl->word2);
5812 sgl->word3 = cpu_to_le32(sgl->word3);
5813 }
5814 return sglq->sli4_xritag;
5815}
5816
5817/**
5818 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
5819 * @phba: Pointer to HBA context object.
5820 * @piocb: Pointer to command iocb.
5821 *
5822 * This routine performs a round robin SCSI command to SLI4 FCP WQ index
5823 * distribution.
5824 *
5825 * Return: index into SLI4 fast-path FCP queue index.
5826 **/
5827static uint32_t
5828lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
5829{
5830 static uint32_t fcp_qidx;
5831
5832 return fcp_qidx++ % phba->cfg_fcp_wq_count;
5833}
5834
5835/**
5836 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
5837 * @phba: Pointer to HBA context object.
5838 * @piocb: Pointer to command iocb.
5839 * @wqe: Pointer to the work queue entry.
5840 *
5841 * This routine converts the iocb command to its Work Queue Entry
5842 * equivalent. The wqe pointer should not have any fields set when
5843 * this routine is called because it will memcpy over them.
5844 * This routine does not set the CQ_ID or the WQEC bits in the
5845 * wqe.
5846 *
5847 * Returns: 0 = Success, IOCB_ERROR = Failure.
5848 **/
5849static int
5850lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5851 union lpfc_wqe *wqe)
5852{
5853 uint32_t payload_len = 0;
5854 uint8_t ct = 0;
5855 uint32_t fip;
5856 uint32_t abort_tag;
5857 uint8_t command_type = ELS_COMMAND_NON_FIP;
5858 uint8_t cmnd;
5859 uint16_t xritag;
5860 struct ulp_bde64 *bpl = NULL;
5861
5862 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
5863 /* The fcp commands will set command type */
5864 if (iocbq->iocb_flag & LPFC_IO_FCP)
5865 command_type = FCP_COMMAND;
5866 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS))
5867 command_type = ELS_COMMAND_FIP;
5868 else
5869 command_type = ELS_COMMAND_NON_FIP;
5870
5871 /* Some of the fields are in the right position already */
5872 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
5873 abort_tag = (uint32_t) iocbq->iotag;
5874 xritag = iocbq->sli4_xritag;
5875 wqe->words[7] = 0; /* The ct field has moved so reset */
5876 /* words0-2 bpl convert bde */
5877 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5878 bpl = (struct ulp_bde64 *)
5879 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
5880 if (!bpl)
5881 return IOCB_ERROR;
5882
5883 /* Should already be byte swapped. */
5884 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
5885 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
5886 /* swap the size field back to the cpu so we
5887 * can assign it to the sgl.
5888 */
5889 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5890 payload_len = wqe->generic.bde.tus.f.bdeSize;
5891 } else
5892 payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
5893
5894 iocbq->iocb.ulpIoTag = iocbq->iotag;
5895 cmnd = iocbq->iocb.ulpCommand;
5896
5897 switch (iocbq->iocb.ulpCommand) {
5898 case CMD_ELS_REQUEST64_CR:
5899 if (!iocbq->iocb.ulpLe) {
5900 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5901 "2007 Only Limited Edition cmd Format"
5902 " supported 0x%x\n",
5903 iocbq->iocb.ulpCommand);
5904 return IOCB_ERROR;
5905 }
5906 wqe->els_req.payload_len = payload_len;
5907 /* Els_reguest64 has a TMO */
5908 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
5909 iocbq->iocb.ulpTimeout);
5910 /* Need a VF for word 4 set the vf bit*/
5911 bf_set(els_req64_vf, &wqe->els_req, 0);
5912 /* And a VFID for word 12 */
5913 bf_set(els_req64_vfid, &wqe->els_req, 0);
5914 /*
5915 * Set ct field to 3, indicates that the context_tag field
5916 * contains the FCFI and remote N_Port_ID is
5917 * in word 5.
5918 */
5919
5920 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5921 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5922 iocbq->iocb.ulpContext);
5923
5924 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
5925 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5926 /* CCP CCPE PV PRI in word10 were set in the memcpy */
5927 break;
5928 case CMD_XMIT_SEQUENCE64_CR:
5929 /* word3 iocb=io_tag32 wqe=payload_offset */
5930 /* payload offset used for multilpe outstanding
5931 * sequences on the same exchange
5932 */
5933 wqe->words[3] = 0;
5934 /* word4 relative_offset memcpy */
5935 /* word5 r_ctl/df_ctl memcpy */
5936 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5937 wqe->xmit_sequence.xmit_len = payload_len;
5938 break;
5939 case CMD_XMIT_BCAST64_CN:
5940 /* word3 iocb=iotag32 wqe=payload_len */
5941 wqe->words[3] = 0; /* no definition for this in wqe */
5942 /* word4 iocb=rsvd wqe=rsvd */
5943 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
5944 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
5945 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5946 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5947 break;
5948 case CMD_FCP_IWRITE64_CR:
5949 command_type = FCP_COMMAND_DATA_OUT;
5950 /* The struct for wqe fcp_iwrite has 3 fields that are somewhat
5951 * confusing.
5952 * word3 is payload_len: byte offset to the sgl entry for the
5953 * fcp_command.
5954 * word4 is total xfer len, same as the IOCB->ulpParameter.
5955 * word5 is initial xfer len 0 = wait for xfer-ready
5956 */
5957
5958 /* Always wait for xfer-ready before sending data */
5959 wqe->fcp_iwrite.initial_xfer_len = 0;
5960 /* word 4 (xfer length) should have been set on the memcpy */
5961
5962 /* allow write to fall through to read */
5963 case CMD_FCP_IREAD64_CR:
5964 /* FCP_CMD is always the 1st sgl entry */
5965 wqe->fcp_iread.payload_len =
5966 payload_len + sizeof(struct fcp_rsp);
5967
5968 /* word 4 (xfer length) should have been set on the memcpy */
5969
5970 bf_set(lpfc_wqe_gen_erp, &wqe->generic,
5971 iocbq->iocb.ulpFCP2Rcvy);
5972 bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
5973 /* The XC bit and the XS bit are similar. The driver never
5974 * tracked whether or not the exchange was previouslly open.
5975 * XC = Exchange create, 0 is create. 1 is already open.
5976 * XS = link cmd: 1 do not close the exchange after command.
5977 * XS = 0 close exchange when command completes.
5978 * The only time we would not set the XC bit is when the XS bit
5979 * is set and we are sending our 2nd or greater command on
5980 * this exchange.
5981 */
5982 /* Always open the exchange */
5983 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5984
5985 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
5986 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5987 break;
5988 case CMD_FCP_ICMND64_CR:
5989 /* Always open the exchange */
5990 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5991
5992 wqe->words[4] = 0;
5993 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
5994 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5995 break;
5996 case CMD_GEN_REQUEST64_CR:
5997 /* word3 command length is described as byte offset to the
5998 * rsp_data. Would always be 16, sizeof(struct sli4_sge)
5999 * sgl[0] = cmnd
6000 * sgl[1] = rsp.
6001 *
6002 */
6003 wqe->gen_req.command_len = payload_len;
6004 /* Word4 parameter copied in the memcpy */
6005 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
6006 /* word6 context tag copied in memcpy */
6007 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
6008 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
6009 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6010 "2015 Invalid CT %x command 0x%x\n",
6011 ct, iocbq->iocb.ulpCommand);
6012 return IOCB_ERROR;
6013 }
6014 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
6015 bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
6016 iocbq->iocb.ulpTimeout);
6017
6018 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
6019 command_type = OTHER_COMMAND;
6020 break;
6021 case CMD_XMIT_ELS_RSP64_CX:
6022 /* words0-2 BDE memcpy */
6023 /* word3 iocb=iotag32 wqe=rsvd */
6024 wqe->words[3] = 0;
6025 /* word4 iocb=did wge=rsvd. */
6026 wqe->words[4] = 0;
6027 /* word5 iocb=rsvd wge=did */
6028 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
6029 iocbq->iocb.un.elsreq64.remoteID);
6030
6031 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
6032 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
6033
6034 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
6035 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
6036 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
6037 bf_set(lpfc_wqe_gen_context, &wqe->generic,
6038 iocbq->vport->vpi + phba->vpi_base);
6039 command_type = OTHER_COMMAND;
6040 break;
6041 case CMD_CLOSE_XRI_CN:
6042 case CMD_ABORT_XRI_CN:
6043 case CMD_ABORT_XRI_CX:
6044 /* words 0-2 memcpy should be 0 rserved */
6045 /* port will send abts */
6046 if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6047 /*
6048 * The link is down so the fw does not need to send abts
6049 * on the wire.
6050 */
6051 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
6052 else
6053 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
6054 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
6055 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
6056 wqe->words[5] = 0;
6057 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
6058 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
6059 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
6060 wqe->generic.abort_tag = abort_tag;
6061 /*
6062 * The abort handler will send us CMD_ABORT_XRI_CN or
6063 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
6064 */
6065 bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
6066 cmnd = CMD_ABORT_XRI_CX;
6067 command_type = OTHER_COMMAND;
6068 xritag = 0;
6069 break;
6070 case CMD_XRI_ABORTED_CX:
6071 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
6072 /* words0-2 are all 0's no bde */
6073 /* word3 and word4 are rsvrd */
6074 wqe->words[3] = 0;
6075 wqe->words[4] = 0;
6076 /* word5 iocb=rsvd wge=did */
6077 /* There is no remote port id in the IOCB? */
6078 /* Let this fall through and fail */
6079 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
6080 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
6081 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
6082 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
6083 default:
6084 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6085 "2014 Invalid command 0x%x\n",
6086 iocbq->iocb.ulpCommand);
6087 return IOCB_ERROR;
6088 break;
6089
6090 }
6091 bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
6092 bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
6093 wqe->generic.abort_tag = abort_tag;
6094 bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
6095 bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
6096 bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
6097 bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
6098
6099 return 0;
6100}
6101
6102/**
6103 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
6104 * @phba: Pointer to HBA context object.
6105 * @ring_number: SLI ring number to issue iocb on.
6106 * @piocb: Pointer to command iocb.
6107 * @flag: Flag indicating if this command can be put into txq.
6108 *
6109 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
6110 * an iocb command to an HBA with SLI-4 interface spec.
6111 *
6112 * This function is called with hbalock held. The function will return success
6113 * after it successfully submit the iocb to firmware or after adding to the
6114 * txq.
6115 **/
6116static int
6117__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6118 struct lpfc_iocbq *piocb, uint32_t flag)
6119{
6120 struct lpfc_sglq *sglq;
6121 uint16_t xritag;
6122 union lpfc_wqe wqe;
6123 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
6124 uint32_t fcp_wqidx;
6125
6126 if (piocb->sli4_xritag == NO_XRI) {
6127 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6128 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6129 sglq = NULL;
6130 else {
6131 sglq = __lpfc_sli_get_sglq(phba);
6132 if (!sglq)
6133 return IOCB_ERROR;
6134 piocb->sli4_xritag = sglq->sli4_xritag;
6135 }
6136 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6137 sglq = NULL; /* These IO's already have an XRI and
6138 * a mapped sgl.
6139 */
6140 } else {
6141 /* This is a continuation of a commandi,(CX) so this
6142 * sglq is on the active list
6143 */
6144 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
6145 if (!sglq)
6146 return IOCB_ERROR;
6147 }
6148
6149 if (sglq) {
6150 xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq);
6151 if (xritag != sglq->sli4_xritag)
6152 return IOCB_ERROR;
6153 }
6154
6155 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
6156 return IOCB_ERROR;
6157
6158 if (piocb->iocb_flag & LPFC_IO_FCP) {
6159 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb);
6160 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
6161 return IOCB_ERROR;
6162 } else {
6163 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
6164 return IOCB_ERROR;
6165 }
6166 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
6167
6168 return 0;
6169}
6170
6171/**
6172 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
6173 *
6174 * This routine wraps the actual lockless version for issusing IOCB function
6175 * pointer from the lpfc_hba struct.
6176 *
6177 * Return codes:
6178 * IOCB_ERROR - Error
6179 * IOCB_SUCCESS - Success
6180 * IOCB_BUSY - Busy
6181 **/
6182static inline int
6183__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6184 struct lpfc_iocbq *piocb, uint32_t flag)
6185{
6186 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
6187}
6188
6189/**
6190 * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
6191 * @phba: The hba struct for which this call is being executed.
6192 * @dev_grp: The HBA PCI-Device group number.
6193 *
6194 * This routine sets up the SLI interface API function jump table in @phba
6195 * struct.
6196 * Returns: 0 - success, -ENODEV - failure.
6197 **/
6198int
6199lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6200{
6201
6202 switch (dev_grp) {
6203 case LPFC_PCI_DEV_LP:
6204 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
6205 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
6206 break;
6207 case LPFC_PCI_DEV_OC:
6208 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
6209 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
6210 break;
6211 default:
6212 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6213 "1419 Invalid HBA PCI-device group: 0x%x\n",
6214 dev_grp);
6215 return -ENODEV;
6216 break;
6217 }
6218 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
6219 return 0;
6220}
3836 6221
3837/** 6222/**
3838 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 6223 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
@@ -3848,14 +6233,14 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3848 * functions which do not hold hbalock. 6233 * functions which do not hold hbalock.
3849 **/ 6234 **/
3850int 6235int
3851lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6236lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
3852 struct lpfc_iocbq *piocb, uint32_t flag) 6237 struct lpfc_iocbq *piocb, uint32_t flag)
3853{ 6238{
3854 unsigned long iflags; 6239 unsigned long iflags;
3855 int rc; 6240 int rc;
3856 6241
3857 spin_lock_irqsave(&phba->hbalock, iflags); 6242 spin_lock_irqsave(&phba->hbalock, iflags);
3858 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); 6243 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
3859 spin_unlock_irqrestore(&phba->hbalock, iflags); 6244 spin_unlock_irqrestore(&phba->hbalock, iflags);
3860 6245
3861 return rc; 6246 return rc;
@@ -4148,6 +6533,52 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
4148} 6533}
4149 6534
4150/** 6535/**
6536 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
6537 * @phba: Pointer to HBA context object.
6538 *
6539 * This routine flushes the mailbox command subsystem. It will unconditionally
6540 * flush all the mailbox commands in the three possible stages in the mailbox
6541 * command sub-system: pending mailbox command queue; the outstanding mailbox
6542 * command; and completed mailbox command queue. It is caller's responsibility
6543 * to make sure that the driver is in the proper state to flush the mailbox
6544 * command sub-system. Namely, the posting of mailbox commands into the
6545 * pending mailbox command queue from the various clients must be stopped;
6546 * either the HBA is in a state that it will never works on the outstanding
6547 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
6548 * mailbox command has been completed.
6549 **/
6550static void
6551lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
6552{
6553 LIST_HEAD(completions);
6554 struct lpfc_sli *psli = &phba->sli;
6555 LPFC_MBOXQ_t *pmb;
6556 unsigned long iflag;
6557
6558 /* Flush all the mailbox commands in the mbox system */
6559 spin_lock_irqsave(&phba->hbalock, iflag);
6560 /* The pending mailbox command queue */
6561 list_splice_init(&phba->sli.mboxq, &completions);
6562 /* The outstanding active mailbox command */
6563 if (psli->mbox_active) {
6564 list_add_tail(&psli->mbox_active->list, &completions);
6565 psli->mbox_active = NULL;
6566 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6567 }
6568 /* The completed mailbox command queue */
6569 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
6570 spin_unlock_irqrestore(&phba->hbalock, iflag);
6571
6572 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
6573 while (!list_empty(&completions)) {
6574 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
6575 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
6576 if (pmb->mbox_cmpl)
6577 pmb->mbox_cmpl(phba, pmb);
6578 }
6579}
6580
6581/**
4151 * lpfc_sli_host_down - Vport cleanup function 6582 * lpfc_sli_host_down - Vport cleanup function
4152 * @vport: Pointer to virtual port object. 6583 * @vport: Pointer to virtual port object.
4153 * 6584 *
@@ -4240,9 +6671,11 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4240 struct lpfc_sli *psli = &phba->sli; 6671 struct lpfc_sli *psli = &phba->sli;
4241 struct lpfc_sli_ring *pring; 6672 struct lpfc_sli_ring *pring;
4242 struct lpfc_dmabuf *buf_ptr; 6673 struct lpfc_dmabuf *buf_ptr;
4243 LPFC_MBOXQ_t *pmb;
4244 int i;
4245 unsigned long flags = 0; 6674 unsigned long flags = 0;
6675 int i;
6676
6677 /* Shutdown the mailbox command sub-system */
6678 lpfc_sli_mbox_sys_shutdown(phba);
4246 6679
4247 lpfc_hba_down_prep(phba); 6680 lpfc_hba_down_prep(phba);
4248 6681
@@ -4287,28 +6720,42 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4287 6720
4288 /* Return any active mbox cmds */ 6721 /* Return any active mbox cmds */
4289 del_timer_sync(&psli->mbox_tmo); 6722 del_timer_sync(&psli->mbox_tmo);
4290 spin_lock_irqsave(&phba->hbalock, flags);
4291 6723
4292 spin_lock(&phba->pport->work_port_lock); 6724 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
4293 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 6725 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
4294 spin_unlock(&phba->pport->work_port_lock); 6726 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
4295 6727
4296 /* Return any pending or completed mbox cmds */ 6728 return 1;
4297 list_splice_init(&phba->sli.mboxq, &completions); 6729}
4298 if (psli->mbox_active) { 6730
4299 list_add_tail(&psli->mbox_active->list, &completions); 6731/**
4300 psli->mbox_active = NULL; 6732 * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
4301 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6733 * @phba: Pointer to HBA context object.
4302 } 6734 *
4303 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 6735 * This function cleans up all queues, iocb, buffers, mailbox commands while
4304 spin_unlock_irqrestore(&phba->hbalock, flags); 6736 * shutting down the SLI4 HBA FCoE function. This function is called with no
6737 * lock held and always returns 1.
6738 *
6739 * This function does the following to cleanup driver FCoE function resources:
6740 * - Free discovery resources for each virtual port
6741 * - Cleanup any pending fabric iocbs
6742 * - Iterate through the iocb txq and free each entry in the list.
6743 * - Free up any buffer posted to the HBA.
6744 * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
6745 * - Free mailbox commands in the mailbox queue.
6746 **/
6747int
6748lpfc_sli4_hba_down(struct lpfc_hba *phba)
6749{
6750 /* Stop the SLI4 device port */
6751 lpfc_stop_port(phba);
6752
6753 /* Tear down the queues in the HBA */
6754 lpfc_sli4_queue_unset(phba);
6755
6756 /* unregister default FCFI from the HBA */
6757 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
4305 6758
4306 while (!list_empty(&completions)) {
4307 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
4308 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
4309 if (pmb->mbox_cmpl)
4310 pmb->mbox_cmpl(phba,pmb);
4311 }
4312 return 1; 6759 return 1;
4313} 6760}
4314 6761
@@ -4639,7 +7086,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4639 iabt = &abtsiocbp->iocb; 7086 iabt = &abtsiocbp->iocb;
4640 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 7087 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
4641 iabt->un.acxri.abortContextTag = icmd->ulpContext; 7088 iabt->un.acxri.abortContextTag = icmd->ulpContext;
4642 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 7089 if (phba->sli_rev == LPFC_SLI_REV4)
7090 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
7091 else
7092 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
4643 iabt->ulpLe = 1; 7093 iabt->ulpLe = 1;
4644 iabt->ulpClass = icmd->ulpClass; 7094 iabt->ulpClass = icmd->ulpClass;
4645 7095
@@ -4655,7 +7105,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4655 "abort cmd iotag x%x\n", 7105 "abort cmd iotag x%x\n",
4656 iabt->un.acxri.abortContextTag, 7106 iabt->un.acxri.abortContextTag,
4657 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 7107 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
4658 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 7108 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
4659 7109
4660 if (retval) 7110 if (retval)
4661 __lpfc_sli_release_iocbq(phba, abtsiocbp); 7111 __lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -4838,7 +7288,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
4838 cmd = &iocbq->iocb; 7288 cmd = &iocbq->iocb;
4839 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 7289 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
4840 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 7290 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
4841 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 7291 if (phba->sli_rev == LPFC_SLI_REV4)
7292 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
7293 else
7294 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
4842 abtsiocb->iocb.ulpLe = 1; 7295 abtsiocb->iocb.ulpLe = 1;
4843 abtsiocb->iocb.ulpClass = cmd->ulpClass; 7296 abtsiocb->iocb.ulpClass = cmd->ulpClass;
4844 abtsiocb->vport = phba->pport; 7297 abtsiocb->vport = phba->pport;
@@ -4850,7 +7303,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
4850 7303
4851 /* Setup callback routine and issue the command. */ 7304 /* Setup callback routine and issue the command. */
4852 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 7305 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4853 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 7306 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
7307 abtsiocb, 0);
4854 if (ret_val == IOCB_ERROR) { 7308 if (ret_val == IOCB_ERROR) {
4855 lpfc_sli_release_iocbq(phba, abtsiocb); 7309 lpfc_sli_release_iocbq(phba, abtsiocb);
4856 errcnt++; 7310 errcnt++;
@@ -4900,6 +7354,32 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
4900} 7354}
4901 7355
4902/** 7356/**
7357 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
7358 * @phba: Pointer to HBA context object..
7359 * @piocbq: Pointer to command iocb.
7360 * @flag: Flag to test.
7361 *
7362 * This routine grabs the hbalock and then test the iocb_flag to
7363 * see if the passed in flag is set.
7364 * Returns:
7365 * 1 if flag is set.
7366 * 0 if flag is not set.
7367 **/
7368static int
7369lpfc_chk_iocb_flg(struct lpfc_hba *phba,
7370 struct lpfc_iocbq *piocbq, uint32_t flag)
7371{
7372 unsigned long iflags;
7373 int ret;
7374
7375 spin_lock_irqsave(&phba->hbalock, iflags);
7376 ret = piocbq->iocb_flag & flag;
7377 spin_unlock_irqrestore(&phba->hbalock, iflags);
7378 return ret;
7379
7380}
7381
7382/**
4903 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 7383 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
4904 * @phba: Pointer to HBA context object.. 7384 * @phba: Pointer to HBA context object..
4905 * @pring: Pointer to sli ring. 7385 * @pring: Pointer to sli ring.
@@ -4931,7 +7411,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
4931 **/ 7411 **/
4932int 7412int
4933lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 7413lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
4934 struct lpfc_sli_ring *pring, 7414 uint32_t ring_number,
4935 struct lpfc_iocbq *piocb, 7415 struct lpfc_iocbq *piocb,
4936 struct lpfc_iocbq *prspiocbq, 7416 struct lpfc_iocbq *prspiocbq,
4937 uint32_t timeout) 7417 uint32_t timeout)
@@ -4962,11 +7442,11 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
4962 readl(phba->HCregaddr); /* flush */ 7442 readl(phba->HCregaddr); /* flush */
4963 } 7443 }
4964 7444
4965 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 7445 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
4966 if (retval == IOCB_SUCCESS) { 7446 if (retval == IOCB_SUCCESS) {
4967 timeout_req = timeout * HZ; 7447 timeout_req = timeout * HZ;
4968 timeleft = wait_event_timeout(done_q, 7448 timeleft = wait_event_timeout(done_q,
4969 piocb->iocb_flag & LPFC_IO_WAKE, 7449 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
4970 timeout_req); 7450 timeout_req);
4971 7451
4972 if (piocb->iocb_flag & LPFC_IO_WAKE) { 7452 if (piocb->iocb_flag & LPFC_IO_WAKE) {
@@ -5077,53 +7557,150 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
5077} 7557}
5078 7558
5079/** 7559/**
5080 * lpfc_sli_flush_mbox_queue - mailbox queue cleanup function 7560 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
5081 * @phba: Pointer to HBA context. 7561 * @phba: Pointer to HBA context.
5082 * 7562 *
5083 * This function is called to cleanup any pending mailbox 7563 * This function is called to shutdown the driver's mailbox sub-system.
5084 * objects in the driver queue before bringing the HBA offline. 7564 * It first marks the mailbox sub-system is in a block state to prevent
5085 * This function is called while resetting the HBA. 7565 * the asynchronous mailbox command from issued off the pending mailbox
5086 * The function is called without any lock held. The function 7566 * command queue. If the mailbox command sub-system shutdown is due to
5087 * takes hbalock to update SLI data structure. 7567 * HBA error conditions such as EEH or ERATT, this routine shall invoke
5088 * This function returns 1 when there is an active mailbox 7568 * the mailbox sub-system flush routine to forcefully bring down the
5089 * command pending else returns 0. 7569 * mailbox sub-system. Otherwise, if it is due to normal condition (such
7570 * as with offline or HBA function reset), this routine will wait for the
7571 * outstanding mailbox command to complete before invoking the mailbox
7572 * sub-system flush routine to gracefully bring down mailbox sub-system.
5090 **/ 7573 **/
5091int 7574void
5092lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 7575lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
5093{ 7576{
5094 struct lpfc_vport *vport = phba->pport; 7577 struct lpfc_sli *psli = &phba->sli;
5095 int i = 0; 7578 uint8_t actcmd = MBX_HEARTBEAT;
5096 uint32_t ha_copy; 7579 unsigned long timeout;
5097 7580
5098 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { 7581 spin_lock_irq(&phba->hbalock);
5099 if (i++ > LPFC_MBOX_TMO * 1000) 7582 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
5100 return 1; 7583 spin_unlock_irq(&phba->hbalock);
5101 7584
5102 /* 7585 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
5103 * Call lpfc_sli_handle_mb_event only if a mailbox cmd
5104 * did finish. This way we won't get the misleading
5105 * "Stray Mailbox Interrupt" message.
5106 */
5107 spin_lock_irq(&phba->hbalock); 7586 spin_lock_irq(&phba->hbalock);
5108 ha_copy = phba->work_ha; 7587 if (phba->sli.mbox_active)
5109 phba->work_ha &= ~HA_MBATT; 7588 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
5110 spin_unlock_irq(&phba->hbalock); 7589 spin_unlock_irq(&phba->hbalock);
7590 /* Determine how long we might wait for the active mailbox
7591 * command to be gracefully completed by firmware.
7592 */
7593 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) *
7594 1000) + jiffies;
7595 while (phba->sli.mbox_active) {
7596 /* Check active mailbox complete status every 2ms */
7597 msleep(2);
7598 if (time_after(jiffies, timeout))
7599 /* Timeout, let the mailbox flush routine to
7600 * forcefully release active mailbox command
7601 */
7602 break;
7603 }
7604 }
7605 lpfc_sli_mbox_sys_flush(phba);
7606}
5111 7607
5112 if (ha_copy & HA_MBATT) 7608/**
5113 if (lpfc_sli_handle_mb_event(phba) == 0) 7609 * lpfc_sli_eratt_read - read sli-3 error attention events
5114 i = 0; 7610 * @phba: Pointer to HBA context.
7611 *
7612 * This function is called to read the SLI3 device error attention registers
7613 * for possible error attention events. The caller must hold the hostlock
7614 * with spin_lock_irq().
7615 *
7616 * This fucntion returns 1 when there is Error Attention in the Host Attention
7617 * Register and returns 0 otherwise.
7618 **/
7619static int
7620lpfc_sli_eratt_read(struct lpfc_hba *phba)
7621{
7622 uint32_t ha_copy;
7623
7624 /* Read chip Host Attention (HA) register */
7625 ha_copy = readl(phba->HAregaddr);
7626 if (ha_copy & HA_ERATT) {
7627 /* Read host status register to retrieve error event */
7628 lpfc_sli_read_hs(phba);
5115 7629
5116 msleep(1); 7630 /* Check if there is a deferred error condition is active */
7631 if ((HS_FFER1 & phba->work_hs) &&
7632 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7633 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
7634 phba->hba_flag |= DEFER_ERATT;
7635 /* Clear all interrupt enable conditions */
7636 writel(0, phba->HCregaddr);
7637 readl(phba->HCregaddr);
7638 }
7639
7640 /* Set the driver HA work bitmap */
7641 phba->work_ha |= HA_ERATT;
7642 /* Indicate polling handles this ERATT */
7643 phba->hba_flag |= HBA_ERATT_HANDLED;
7644 return 1;
5117 } 7645 }
7646 return 0;
7647}
7648
7649/**
7650 * lpfc_sli4_eratt_read - read sli-4 error attention events
7651 * @phba: Pointer to HBA context.
7652 *
7653 * This function is called to read the SLI4 device error attention registers
7654 * for possible error attention events. The caller must hold the hostlock
7655 * with spin_lock_irq().
7656 *
7657 * This fucntion returns 1 when there is Error Attention in the Host Attention
7658 * Register and returns 0 otherwise.
7659 **/
7660static int
7661lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7662{
7663 uint32_t uerr_sta_hi, uerr_sta_lo;
7664 uint32_t onlnreg0, onlnreg1;
5118 7665
5119 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; 7666 /* For now, use the SLI4 device internal unrecoverable error
7667 * registers for error attention. This can be changed later.
7668 */
7669 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
7670 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
7671 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
7672 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
7673 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
7674 if (uerr_sta_lo || uerr_sta_hi) {
7675 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7676 "1423 HBA Unrecoverable error: "
7677 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
7678 "online0_reg=0x%x, online1_reg=0x%x\n",
7679 uerr_sta_lo, uerr_sta_hi,
7680 onlnreg0, onlnreg1);
7681 /* TEMP: as the driver error recover logic is not
7682 * fully developed, we just log the error message
7683 * and the device error attention action is now
7684 * temporarily disabled.
7685 */
7686 return 0;
7687 phba->work_status[0] = uerr_sta_lo;
7688 phba->work_status[1] = uerr_sta_hi;
7689 /* Set the driver HA work bitmap */
7690 phba->work_ha |= HA_ERATT;
7691 /* Indicate polling handles this ERATT */
7692 phba->hba_flag |= HBA_ERATT_HANDLED;
7693 return 1;
7694 }
7695 }
7696 return 0;
5120} 7697}
5121 7698
5122/** 7699/**
5123 * lpfc_sli_check_eratt - check error attention events 7700 * lpfc_sli_check_eratt - check error attention events
5124 * @phba: Pointer to HBA context. 7701 * @phba: Pointer to HBA context.
5125 * 7702 *
5126 * This function is called form timer soft interrupt context to check HBA's 7703 * This function is called from timer soft interrupt context to check HBA's
5127 * error attention register bit for error attention events. 7704 * error attention register bit for error attention events.
5128 * 7705 *
5129 * This fucntion returns 1 when there is Error Attention in the Host Attention 7706 * This fucntion returns 1 when there is Error Attention in the Host Attention
@@ -5134,10 +7711,6 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5134{ 7711{
5135 uint32_t ha_copy; 7712 uint32_t ha_copy;
5136 7713
5137 /* If PCI channel is offline, don't process it */
5138 if (unlikely(pci_channel_offline(phba->pcidev)))
5139 return 0;
5140
5141 /* If somebody is waiting to handle an eratt, don't process it 7714 /* If somebody is waiting to handle an eratt, don't process it
5142 * here. The brdkill function will do this. 7715 * here. The brdkill function will do this.
5143 */ 7716 */
@@ -5161,56 +7734,84 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5161 return 0; 7734 return 0;
5162 } 7735 }
5163 7736
5164 /* Read chip Host Attention (HA) register */ 7737 /* If PCI channel is offline, don't process it */
5165 ha_copy = readl(phba->HAregaddr); 7738 if (unlikely(pci_channel_offline(phba->pcidev))) {
5166 if (ha_copy & HA_ERATT) {
5167 /* Read host status register to retrieve error event */
5168 lpfc_sli_read_hs(phba);
5169
5170 /* Check if there is a deferred error condition is active */
5171 if ((HS_FFER1 & phba->work_hs) &&
5172 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
5173 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
5174 phba->hba_flag |= DEFER_ERATT;
5175 /* Clear all interrupt enable conditions */
5176 writel(0, phba->HCregaddr);
5177 readl(phba->HCregaddr);
5178 }
5179
5180 /* Set the driver HA work bitmap */
5181 phba->work_ha |= HA_ERATT;
5182 /* Indicate polling handles this ERATT */
5183 phba->hba_flag |= HBA_ERATT_HANDLED;
5184 spin_unlock_irq(&phba->hbalock); 7739 spin_unlock_irq(&phba->hbalock);
5185 return 1; 7740 return 0;
7741 }
7742
7743 switch (phba->sli_rev) {
7744 case LPFC_SLI_REV2:
7745 case LPFC_SLI_REV3:
7746 /* Read chip Host Attention (HA) register */
7747 ha_copy = lpfc_sli_eratt_read(phba);
7748 break;
7749 case LPFC_SLI_REV4:
7750 /* Read devcie Uncoverable Error (UERR) registers */
7751 ha_copy = lpfc_sli4_eratt_read(phba);
7752 break;
7753 default:
7754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7755 "0299 Invalid SLI revision (%d)\n",
7756 phba->sli_rev);
7757 ha_copy = 0;
7758 break;
5186 } 7759 }
5187 spin_unlock_irq(&phba->hbalock); 7760 spin_unlock_irq(&phba->hbalock);
7761
7762 return ha_copy;
7763}
7764
7765/**
7766 * lpfc_intr_state_check - Check device state for interrupt handling
7767 * @phba: Pointer to HBA context.
7768 *
7769 * This inline routine checks whether a device or its PCI slot is in a state
7770 * that the interrupt should be handled.
7771 *
7772 * This function returns 0 if the device or the PCI slot is in a state that
7773 * interrupt should be handled, otherwise -EIO.
7774 */
7775static inline int
7776lpfc_intr_state_check(struct lpfc_hba *phba)
7777{
7778 /* If the pci channel is offline, ignore all the interrupts */
7779 if (unlikely(pci_channel_offline(phba->pcidev)))
7780 return -EIO;
7781
7782 /* Update device level interrupt statistics */
7783 phba->sli.slistat.sli_intr++;
7784
7785 /* Ignore all interrupts during initialization. */
7786 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
7787 return -EIO;
7788
5188 return 0; 7789 return 0;
5189} 7790}
5190 7791
5191/** 7792/**
5192 * lpfc_sp_intr_handler - The slow-path interrupt handler of lpfc driver 7793 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
5193 * @irq: Interrupt number. 7794 * @irq: Interrupt number.
5194 * @dev_id: The device context pointer. 7795 * @dev_id: The device context pointer.
5195 * 7796 *
5196 * This function is directly called from the PCI layer as an interrupt 7797 * This function is directly called from the PCI layer as an interrupt
5197 * service routine when the device is enabled with MSI-X multi-message 7798 * service routine when device with SLI-3 interface spec is enabled with
5198 * interrupt mode and there are slow-path events in the HBA. However, 7799 * MSI-X multi-message interrupt mode and there are slow-path events in
5199 * when the device is enabled with either MSI or Pin-IRQ interrupt mode, 7800 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
5200 * this function is called as part of the device-level interrupt handler. 7801 * interrupt mode, this function is called as part of the device-level
5201 * When the PCI slot is in error recovery or the HBA is undergoing 7802 * interrupt handler. When the PCI slot is in error recovery or the HBA
5202 * initialization, the interrupt handler will not process the interrupt. 7803 * is undergoing initialization, the interrupt handler will not process
5203 * The link attention and ELS ring attention events are handled by the 7804 * the interrupt. The link attention and ELS ring attention events are
5204 * worker thread. The interrupt handler signals the worker thread and 7805 * handled by the worker thread. The interrupt handler signals the worker
5205 * and returns for these events. This function is called without any 7806 * thread and returns for these events. This function is called without
5206 * lock held. It gets the hbalock to access and update SLI data 7807 * any lock held. It gets the hbalock to access and update SLI data
5207 * structures. 7808 * structures.
5208 * 7809 *
5209 * This function returns IRQ_HANDLED when interrupt is handled else it 7810 * This function returns IRQ_HANDLED when interrupt is handled else it
5210 * returns IRQ_NONE. 7811 * returns IRQ_NONE.
5211 **/ 7812 **/
5212irqreturn_t 7813irqreturn_t
5213lpfc_sp_intr_handler(int irq, void *dev_id) 7814lpfc_sli_sp_intr_handler(int irq, void *dev_id)
5214{ 7815{
5215 struct lpfc_hba *phba; 7816 struct lpfc_hba *phba;
5216 uint32_t ha_copy; 7817 uint32_t ha_copy;
@@ -5240,13 +7841,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5240 * individual interrupt handler in MSI-X multi-message interrupt mode 7841 * individual interrupt handler in MSI-X multi-message interrupt mode
5241 */ 7842 */
5242 if (phba->intr_type == MSIX) { 7843 if (phba->intr_type == MSIX) {
5243 /* If the pci channel is offline, ignore all the interrupts */ 7844 /* Check device state for handling interrupt */
5244 if (unlikely(pci_channel_offline(phba->pcidev))) 7845 if (lpfc_intr_state_check(phba))
5245 return IRQ_NONE;
5246 /* Update device-level interrupt statistics */
5247 phba->sli.slistat.sli_intr++;
5248 /* Ignore all interrupts during initialization. */
5249 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5250 return IRQ_NONE; 7846 return IRQ_NONE;
5251 /* Need to read HA REG for slow-path events */ 7847 /* Need to read HA REG for slow-path events */
5252 spin_lock_irqsave(&phba->hbalock, iflag); 7848 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -5271,7 +7867,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5271 * interrupt. 7867 * interrupt.
5272 */ 7868 */
5273 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7869 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5274 spin_unlock_irq(&phba->hbalock); 7870 spin_unlock_irqrestore(&phba->hbalock, iflag);
5275 return IRQ_NONE; 7871 return IRQ_NONE;
5276 } 7872 }
5277 7873
@@ -5364,7 +7960,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5364 7960
5365 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 7961 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
5366 pmb = phba->sli.mbox_active; 7962 pmb = phba->sli.mbox_active;
5367 pmbox = &pmb->mb; 7963 pmbox = &pmb->u.mb;
5368 mbox = phba->mbox; 7964 mbox = phba->mbox;
5369 vport = pmb->vport; 7965 vport = pmb->vport;
5370 7966
@@ -5434,7 +8030,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5434 LOG_MBOX | LOG_SLI, 8030 LOG_MBOX | LOG_SLI,
5435 "0350 rc should have" 8031 "0350 rc should have"
5436 "been MBX_BUSY"); 8032 "been MBX_BUSY");
5437 goto send_current_mbox; 8033 if (rc != MBX_NOT_FINISHED)
8034 goto send_current_mbox;
5438 } 8035 }
5439 } 8036 }
5440 spin_lock_irqsave( 8037 spin_lock_irqsave(
@@ -5471,29 +8068,29 @@ send_current_mbox:
5471 } 8068 }
5472 return IRQ_HANDLED; 8069 return IRQ_HANDLED;
5473 8070
5474} /* lpfc_sp_intr_handler */ 8071} /* lpfc_sli_sp_intr_handler */
5475 8072
5476/** 8073/**
5477 * lpfc_fp_intr_handler - The fast-path interrupt handler of lpfc driver 8074 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
5478 * @irq: Interrupt number. 8075 * @irq: Interrupt number.
5479 * @dev_id: The device context pointer. 8076 * @dev_id: The device context pointer.
5480 * 8077 *
5481 * This function is directly called from the PCI layer as an interrupt 8078 * This function is directly called from the PCI layer as an interrupt
5482 * service routine when the device is enabled with MSI-X multi-message 8079 * service routine when device with SLI-3 interface spec is enabled with
5483 * interrupt mode and there is a fast-path FCP IOCB ring event in the 8080 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
5484 * HBA. However, when the device is enabled with either MSI or Pin-IRQ 8081 * ring event in the HBA. However, when the device is enabled with either
5485 * interrupt mode, this function is called as part of the device-level 8082 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
5486 * interrupt handler. When the PCI slot is in error recovery or the HBA 8083 * device-level interrupt handler. When the PCI slot is in error recovery
5487 * is undergoing initialization, the interrupt handler will not process 8084 * or the HBA is undergoing initialization, the interrupt handler will not
5488 * the interrupt. The SCSI FCP fast-path ring event are handled in the 8085 * process the interrupt. The SCSI FCP fast-path ring event are handled in
5489 * intrrupt context. This function is called without any lock held. It 8086 * the intrrupt context. This function is called without any lock held.
5490 * gets the hbalock to access and update SLI data structures. 8087 * It gets the hbalock to access and update SLI data structures.
5491 * 8088 *
5492 * This function returns IRQ_HANDLED when interrupt is handled else it 8089 * This function returns IRQ_HANDLED when interrupt is handled else it
5493 * returns IRQ_NONE. 8090 * returns IRQ_NONE.
5494 **/ 8091 **/
5495irqreturn_t 8092irqreturn_t
5496lpfc_fp_intr_handler(int irq, void *dev_id) 8093lpfc_sli_fp_intr_handler(int irq, void *dev_id)
5497{ 8094{
5498 struct lpfc_hba *phba; 8095 struct lpfc_hba *phba;
5499 uint32_t ha_copy; 8096 uint32_t ha_copy;
@@ -5513,13 +8110,8 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5513 * individual interrupt handler in MSI-X multi-message interrupt mode 8110 * individual interrupt handler in MSI-X multi-message interrupt mode
5514 */ 8111 */
5515 if (phba->intr_type == MSIX) { 8112 if (phba->intr_type == MSIX) {
5516 /* If pci channel is offline, ignore all the interrupts */ 8113 /* Check device state for handling interrupt */
5517 if (unlikely(pci_channel_offline(phba->pcidev))) 8114 if (lpfc_intr_state_check(phba))
5518 return IRQ_NONE;
5519 /* Update device-level interrupt statistics */
5520 phba->sli.slistat.sli_intr++;
5521 /* Ignore all interrupts during initialization. */
5522 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5523 return IRQ_NONE; 8115 return IRQ_NONE;
5524 /* Need to read HA REG for FCP ring and other ring events */ 8116 /* Need to read HA REG for FCP ring and other ring events */
5525 ha_copy = readl(phba->HAregaddr); 8117 ha_copy = readl(phba->HAregaddr);
@@ -5530,7 +8122,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5530 * any interrupt. 8122 * any interrupt.
5531 */ 8123 */
5532 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8124 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5533 spin_unlock_irq(&phba->hbalock); 8125 spin_unlock_irqrestore(&phba->hbalock, iflag);
5534 return IRQ_NONE; 8126 return IRQ_NONE;
5535 } 8127 }
5536 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 8128 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
@@ -5566,26 +8158,27 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5566 } 8158 }
5567 } 8159 }
5568 return IRQ_HANDLED; 8160 return IRQ_HANDLED;
5569} /* lpfc_fp_intr_handler */ 8161} /* lpfc_sli_fp_intr_handler */
5570 8162
5571/** 8163/**
5572 * lpfc_intr_handler - The device-level interrupt handler of lpfc driver 8164 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
5573 * @irq: Interrupt number. 8165 * @irq: Interrupt number.
5574 * @dev_id: The device context pointer. 8166 * @dev_id: The device context pointer.
5575 * 8167 *
5576 * This function is the device-level interrupt handler called from the PCI 8168 * This function is the HBA device-level interrupt handler to device with
5577 * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is 8169 * SLI-3 interface spec, called from the PCI layer when either MSI or
5578 * an event in the HBA which requires driver attention. This function 8170 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
5579 * invokes the slow-path interrupt attention handling function and fast-path 8171 * requires driver attention. This function invokes the slow-path interrupt
5580 * interrupt attention handling function in turn to process the relevant 8172 * attention handling function and fast-path interrupt attention handling
5581 * HBA attention events. This function is called without any lock held. It 8173 * function in turn to process the relevant HBA attention events. This
5582 * gets the hbalock to access and update SLI data structures. 8174 * function is called without any lock held. It gets the hbalock to access
8175 * and update SLI data structures.
5583 * 8176 *
5584 * This function returns IRQ_HANDLED when interrupt is handled, else it 8177 * This function returns IRQ_HANDLED when interrupt is handled, else it
5585 * returns IRQ_NONE. 8178 * returns IRQ_NONE.
5586 **/ 8179 **/
5587irqreturn_t 8180irqreturn_t
5588lpfc_intr_handler(int irq, void *dev_id) 8181lpfc_sli_intr_handler(int irq, void *dev_id)
5589{ 8182{
5590 struct lpfc_hba *phba; 8183 struct lpfc_hba *phba;
5591 irqreturn_t sp_irq_rc, fp_irq_rc; 8184 irqreturn_t sp_irq_rc, fp_irq_rc;
@@ -5600,15 +8193,8 @@ lpfc_intr_handler(int irq, void *dev_id)
5600 if (unlikely(!phba)) 8193 if (unlikely(!phba))
5601 return IRQ_NONE; 8194 return IRQ_NONE;
5602 8195
5603 /* If the pci channel is offline, ignore all the interrupts. */ 8196 /* Check device state for handling interrupt */
5604 if (unlikely(pci_channel_offline(phba->pcidev))) 8197 if (lpfc_intr_state_check(phba))
5605 return IRQ_NONE;
5606
5607 /* Update device level interrupt statistics */
5608 phba->sli.slistat.sli_intr++;
5609
5610 /* Ignore all interrupts during initialization. */
5611 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5612 return IRQ_NONE; 8198 return IRQ_NONE;
5613 8199
5614 spin_lock(&phba->hbalock); 8200 spin_lock(&phba->hbalock);
@@ -5650,7 +8236,7 @@ lpfc_intr_handler(int irq, void *dev_id)
5650 status2 >>= (4*LPFC_ELS_RING); 8236 status2 >>= (4*LPFC_ELS_RING);
5651 8237
5652 if (status1 || (status2 & HA_RXMASK)) 8238 if (status1 || (status2 & HA_RXMASK))
5653 sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id); 8239 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
5654 else 8240 else
5655 sp_irq_rc = IRQ_NONE; 8241 sp_irq_rc = IRQ_NONE;
5656 8242
@@ -5670,10 +8256,3321 @@ lpfc_intr_handler(int irq, void *dev_id)
5670 status2 = 0; 8256 status2 = 0;
5671 8257
5672 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 8258 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
5673 fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id); 8259 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
5674 else 8260 else
5675 fp_irq_rc = IRQ_NONE; 8261 fp_irq_rc = IRQ_NONE;
5676 8262
5677 /* Return device-level interrupt handling status */ 8263 /* Return device-level interrupt handling status */
5678 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 8264 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
5679} /* lpfc_intr_handler */ 8265} /* lpfc_sli_intr_handler */
8266
8267/**
8268 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
8269 * @phba: pointer to lpfc hba data structure.
8270 *
8271 * This routine is invoked by the worker thread to process all the pending
8272 * SLI4 FCP abort XRI events.
8273 **/
8274void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
8275{
8276 struct lpfc_cq_event *cq_event;
8277
8278 /* First, declare the fcp xri abort event has been handled */
8279 spin_lock_irq(&phba->hbalock);
8280 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
8281 spin_unlock_irq(&phba->hbalock);
8282 /* Now, handle all the fcp xri abort events */
8283 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
8284 /* Get the first event from the head of the event queue */
8285 spin_lock_irq(&phba->hbalock);
8286 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
8287 cq_event, struct lpfc_cq_event, list);
8288 spin_unlock_irq(&phba->hbalock);
8289 /* Notify aborted XRI for FCP work queue */
8290 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8291 /* Free the event processed back to the free pool */
8292 lpfc_sli4_cq_event_release(phba, cq_event);
8293 }
8294}
8295
8296/**
8297 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
8298 * @phba: pointer to lpfc hba data structure.
8299 *
8300 * This routine is invoked by the worker thread to process all the pending
8301 * SLI4 els abort xri events.
8302 **/
8303void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
8304{
8305 struct lpfc_cq_event *cq_event;
8306
8307 /* First, declare the els xri abort event has been handled */
8308 spin_lock_irq(&phba->hbalock);
8309 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
8310 spin_unlock_irq(&phba->hbalock);
8311 /* Now, handle all the els xri abort events */
8312 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
8313 /* Get the first event from the head of the event queue */
8314 spin_lock_irq(&phba->hbalock);
8315 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
8316 cq_event, struct lpfc_cq_event, list);
8317 spin_unlock_irq(&phba->hbalock);
8318 /* Notify aborted XRI for ELS work queue */
8319 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8320 /* Free the event processed back to the free pool */
8321 lpfc_sli4_cq_event_release(phba, cq_event);
8322 }
8323}
8324
8325static void
8326lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8327 struct lpfc_iocbq *pIocbOut,
8328 struct lpfc_wcqe_complete *wcqe)
8329{
8330 size_t offset = offsetof(struct lpfc_iocbq, iocb);
8331
8332 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
8333 sizeof(struct lpfc_iocbq) - offset);
8334 memset(&pIocbIn->sli4_info, 0,
8335 sizeof(struct lpfc_sli4_rspiocb_info));
8336 /* Map WCQE parameters into irspiocb parameters */
8337 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
8338 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
8339 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
8340 pIocbIn->iocb.un.fcpi.fcpi_parm =
8341 pIocbOut->iocb.un.fcpi.fcpi_parm -
8342 wcqe->total_data_placed;
8343 else
8344 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8345 else
8346 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8347 /* Load in additional WCQE parameters */
8348 pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
8349 pIocbIn->sli4_info.bfield = 0;
8350 if (bf_get(lpfc_wcqe_c_xb, wcqe))
8351 pIocbIn->sli4_info.bfield |= LPFC_XB;
8352 if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
8353 pIocbIn->sli4_info.bfield |= LPFC_PV;
8354 pIocbIn->sli4_info.priority =
8355 bf_get(lpfc_wcqe_c_priority, wcqe);
8356 }
8357}
8358
8359/**
8360 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
8361 * @phba: Pointer to HBA context object.
8362 * @cqe: Pointer to mailbox completion queue entry.
8363 *
8364 * This routine process a mailbox completion queue entry with asynchrous
8365 * event.
8366 *
8367 * Return: true if work posted to worker thread, otherwise false.
8368 **/
8369static bool
8370lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8371{
8372 struct lpfc_cq_event *cq_event;
8373 unsigned long iflags;
8374
8375 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8376 "0392 Async Event: word0:x%x, word1:x%x, "
8377 "word2:x%x, word3:x%x\n", mcqe->word0,
8378 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
8379
8380 /* Allocate a new internal CQ_EVENT entry */
8381 cq_event = lpfc_sli4_cq_event_alloc(phba);
8382 if (!cq_event) {
8383 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8384 "0394 Failed to allocate CQ_EVENT entry\n");
8385 return false;
8386 }
8387
8388 /* Move the CQE into an asynchronous event entry */
8389 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
8390 spin_lock_irqsave(&phba->hbalock, iflags);
8391 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
8392 /* Set the async event flag */
8393 phba->hba_flag |= ASYNC_EVENT;
8394 spin_unlock_irqrestore(&phba->hbalock, iflags);
8395
8396 return true;
8397}
8398
8399/**
8400 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
8401 * @phba: Pointer to HBA context object.
8402 * @cqe: Pointer to mailbox completion queue entry.
8403 *
8404 * This routine process a mailbox completion queue entry with mailbox
8405 * completion event.
8406 *
8407 * Return: true if work posted to worker thread, otherwise false.
8408 **/
8409static bool
8410lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8411{
8412 uint32_t mcqe_status;
8413 MAILBOX_t *mbox, *pmbox;
8414 struct lpfc_mqe *mqe;
8415 struct lpfc_vport *vport;
8416 struct lpfc_nodelist *ndlp;
8417 struct lpfc_dmabuf *mp;
8418 unsigned long iflags;
8419 LPFC_MBOXQ_t *pmb;
8420 bool workposted = false;
8421 int rc;
8422
8423 /* If not a mailbox complete MCQE, out by checking mailbox consume */
8424 if (!bf_get(lpfc_trailer_completed, mcqe))
8425 goto out_no_mqe_complete;
8426
8427 /* Get the reference to the active mbox command */
8428 spin_lock_irqsave(&phba->hbalock, iflags);
8429 pmb = phba->sli.mbox_active;
8430 if (unlikely(!pmb)) {
8431 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
8432 "1832 No pending MBOX command to handle\n");
8433 spin_unlock_irqrestore(&phba->hbalock, iflags);
8434 goto out_no_mqe_complete;
8435 }
8436 spin_unlock_irqrestore(&phba->hbalock, iflags);
8437 mqe = &pmb->u.mqe;
8438 pmbox = (MAILBOX_t *)&pmb->u.mqe;
8439 mbox = phba->mbox;
8440 vport = pmb->vport;
8441
8442 /* Reset heartbeat timer */
8443 phba->last_completion_time = jiffies;
8444 del_timer(&phba->sli.mbox_tmo);
8445
8446 /* Move mbox data to caller's mailbox region, do endian swapping */
8447 if (pmb->mbox_cmpl && mbox)
8448 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
8449 /* Set the mailbox status with SLI4 range 0x4000 */
8450 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
8451 if (mcqe_status != MB_CQE_STATUS_SUCCESS)
8452 bf_set(lpfc_mqe_status, mqe,
8453 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8454
8455 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
8456 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
8457 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
8458 "MBOX dflt rpi: status:x%x rpi:x%x",
8459 mcqe_status,
8460 pmbox->un.varWords[0], 0);
8461 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
8462 mp = (struct lpfc_dmabuf *)(pmb->context1);
8463 ndlp = (struct lpfc_nodelist *)pmb->context2;
8464 /* Reg_LOGIN of dflt RPI was successful. Now lets get
8465 * RID of the PPI using the same mbox buffer.
8466 */
8467 lpfc_unreg_login(phba, vport->vpi,
8468 pmbox->un.varWords[0], pmb);
8469 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
8470 pmb->context1 = mp;
8471 pmb->context2 = ndlp;
8472 pmb->vport = vport;
8473 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
8474 if (rc != MBX_BUSY)
8475 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8476 LOG_SLI, "0385 rc should "
8477 "have been MBX_BUSY\n");
8478 if (rc != MBX_NOT_FINISHED)
8479 goto send_current_mbox;
8480 }
8481 }
8482 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
8483 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8484 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
8485
8486 /* There is mailbox completion work to do */
8487 spin_lock_irqsave(&phba->hbalock, iflags);
8488 __lpfc_mbox_cmpl_put(phba, pmb);
8489 phba->work_ha |= HA_MBATT;
8490 spin_unlock_irqrestore(&phba->hbalock, iflags);
8491 workposted = true;
8492
8493send_current_mbox:
8494 spin_lock_irqsave(&phba->hbalock, iflags);
8495 /* Release the mailbox command posting token */
8496 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8497 /* Setting active mailbox pointer need to be in sync to flag clear */
8498 phba->sli.mbox_active = NULL;
8499 spin_unlock_irqrestore(&phba->hbalock, iflags);
8500 /* Wake up worker thread to post the next pending mailbox command */
8501 lpfc_worker_wake_up(phba);
8502out_no_mqe_complete:
8503 if (bf_get(lpfc_trailer_consumed, mcqe))
8504 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
8505 return workposted;
8506}
8507
8508/**
8509 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
8510 * @phba: Pointer to HBA context object.
8511 * @cqe: Pointer to mailbox completion queue entry.
8512 *
8513 * This routine process a mailbox completion queue entry, it invokes the
8514 * proper mailbox complete handling or asynchrous event handling routine
8515 * according to the MCQE's async bit.
8516 *
8517 * Return: true if work posted to worker thread, otherwise false.
8518 **/
8519static bool
8520lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8521{
8522 struct lpfc_mcqe mcqe;
8523 bool workposted;
8524
8525 /* Copy the mailbox MCQE and convert endian order as needed */
8526 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
8527
8528 /* Invoke the proper event handling routine */
8529 if (!bf_get(lpfc_trailer_async, &mcqe))
8530 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
8531 else
8532 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
8533 return workposted;
8534}
8535
8536/**
8537 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
8538 * @phba: Pointer to HBA context object.
8539 * @wcqe: Pointer to work-queue completion queue entry.
8540 *
8541 * This routine handles an ELS work-queue completion event.
8542 *
8543 * Return: true if work posted to worker thread, otherwise false.
8544 **/
8545static bool
8546lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8547 struct lpfc_wcqe_complete *wcqe)
8548{
8549 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8550 struct lpfc_iocbq *cmdiocbq;
8551 struct lpfc_iocbq *irspiocbq;
8552 unsigned long iflags;
8553 bool workposted = false;
8554
8555 spin_lock_irqsave(&phba->hbalock, iflags);
8556 pring->stats.iocb_event++;
8557 /* Look up the ELS command IOCB and create pseudo response IOCB */
8558 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8559 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8560 spin_unlock_irqrestore(&phba->hbalock, iflags);
8561
8562 if (unlikely(!cmdiocbq)) {
8563 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8564 "0386 ELS complete with no corresponding "
8565 "cmdiocb: iotag (%d)\n",
8566 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8567 return workposted;
8568 }
8569
8570 /* Fake the irspiocbq and copy necessary response information */
8571 irspiocbq = lpfc_sli_get_iocbq(phba);
8572 if (!irspiocbq) {
8573 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8574 "0387 Failed to allocate an iocbq\n");
8575 return workposted;
8576 }
8577 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8578
8579 /* Add the irspiocb to the response IOCB work list */
8580 spin_lock_irqsave(&phba->hbalock, iflags);
8581 list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue);
8582 /* Indicate ELS ring attention */
8583 phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
8584 spin_unlock_irqrestore(&phba->hbalock, iflags);
8585 workposted = true;
8586
8587 return workposted;
8588}
8589
8590/**
8591 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
8592 * @phba: Pointer to HBA context object.
8593 * @wcqe: Pointer to work-queue completion queue entry.
8594 *
8595 * This routine handles slow-path WQ entry comsumed event by invoking the
8596 * proper WQ release routine to the slow-path WQ.
8597 **/
8598static void
8599lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
8600 struct lpfc_wcqe_release *wcqe)
8601{
8602 /* Check for the slow-path ELS work queue */
8603 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
8604 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
8605 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8606 else
8607 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8608 "2579 Slow-path wqe consume event carries "
8609 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
8610 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
8611 phba->sli4_hba.els_wq->queue_id);
8612}
8613
8614/**
8615 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
8616 * @phba: Pointer to HBA context object.
8617 * @cq: Pointer to a WQ completion queue.
8618 * @wcqe: Pointer to work-queue completion queue entry.
8619 *
8620 * This routine handles an XRI abort event.
8621 *
8622 * Return: true if work posted to worker thread, otherwise false.
8623 **/
8624static bool
8625lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
8626 struct lpfc_queue *cq,
8627 struct sli4_wcqe_xri_aborted *wcqe)
8628{
8629 bool workposted = false;
8630 struct lpfc_cq_event *cq_event;
8631 unsigned long iflags;
8632
8633 /* Allocate a new internal CQ_EVENT entry */
8634 cq_event = lpfc_sli4_cq_event_alloc(phba);
8635 if (!cq_event) {
8636 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8637 "0602 Failed to allocate CQ_EVENT entry\n");
8638 return false;
8639 }
8640
8641 /* Move the CQE into the proper xri abort event list */
8642 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
8643 switch (cq->subtype) {
8644 case LPFC_FCP:
8645 spin_lock_irqsave(&phba->hbalock, iflags);
8646 list_add_tail(&cq_event->list,
8647 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
8648 /* Set the fcp xri abort event flag */
8649 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
8650 spin_unlock_irqrestore(&phba->hbalock, iflags);
8651 workposted = true;
8652 break;
8653 case LPFC_ELS:
8654 spin_lock_irqsave(&phba->hbalock, iflags);
8655 list_add_tail(&cq_event->list,
8656 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
8657 /* Set the els xri abort event flag */
8658 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
8659 spin_unlock_irqrestore(&phba->hbalock, iflags);
8660 workposted = true;
8661 break;
8662 default:
8663 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8664 "0603 Invalid work queue CQE subtype (x%x)\n",
8665 cq->subtype);
8666 workposted = false;
8667 break;
8668 }
8669 return workposted;
8670}
8671
8672/**
8673 * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
8674 * @phba: Pointer to HBA context object.
8675 * @cq: Pointer to the completion queue.
8676 * @wcqe: Pointer to a completion queue entry.
8677 *
8678 * This routine process a slow-path work-queue completion queue entry.
8679 *
8680 * Return: true if work posted to worker thread, otherwise false.
8681 **/
8682static bool
8683lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8684 struct lpfc_cqe *cqe)
8685{
8686 struct lpfc_wcqe_complete wcqe;
8687 bool workposted = false;
8688
8689 /* Copy the work queue CQE and convert endian order if needed */
8690 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8691
8692 /* Check and process for different type of WCQE and dispatch */
8693 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8694 case CQE_CODE_COMPL_WQE:
8695 /* Process the WQ complete event */
8696 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8697 (struct lpfc_wcqe_complete *)&wcqe);
8698 break;
8699 case CQE_CODE_RELEASE_WQE:
8700 /* Process the WQ release event */
8701 lpfc_sli4_sp_handle_rel_wcqe(phba,
8702 (struct lpfc_wcqe_release *)&wcqe);
8703 break;
8704 case CQE_CODE_XRI_ABORTED:
8705 /* Process the WQ XRI abort event */
8706 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8707 (struct sli4_wcqe_xri_aborted *)&wcqe);
8708 break;
8709 default:
8710 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8711 "0388 Not a valid WCQE code: x%x\n",
8712 bf_get(lpfc_wcqe_c_code, &wcqe));
8713 break;
8714 }
8715 return workposted;
8716}
8717
8718/**
8719 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
8720 * @phba: Pointer to HBA context object.
8721 * @rcqe: Pointer to receive-queue completion queue entry.
8722 *
8723 * This routine process a receive-queue completion queue entry.
8724 *
8725 * Return: true if work posted to worker thread, otherwise false.
8726 **/
8727static bool
8728lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8729{
8730 struct lpfc_rcqe rcqe;
8731 bool workposted = false;
8732 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
8733 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
8734 struct hbq_dmabuf *dma_buf;
8735 uint32_t status;
8736 unsigned long iflags;
8737
8738 /* Copy the receive queue CQE and convert endian order if needed */
8739 lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
8740 lpfc_sli4_rq_release(hrq, drq);
8741 if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
8742 goto out;
8743 if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
8744 goto out;
8745
8746 status = bf_get(lpfc_rcqe_status, &rcqe);
8747 switch (status) {
8748 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
8749 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8750 "2537 Receive Frame Truncated!!\n");
8751 case FC_STATUS_RQ_SUCCESS:
8752 spin_lock_irqsave(&phba->hbalock, iflags);
8753 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
8754 if (!dma_buf) {
8755 spin_unlock_irqrestore(&phba->hbalock, iflags);
8756 goto out;
8757 }
8758 memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe));
8759 /* save off the frame for the word thread to process */
8760 list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list);
8761 /* Frame received */
8762 phba->hba_flag |= HBA_RECEIVE_BUFFER;
8763 spin_unlock_irqrestore(&phba->hbalock, iflags);
8764 workposted = true;
8765 break;
8766 case FC_STATUS_INSUFF_BUF_NEED_BUF:
8767 case FC_STATUS_INSUFF_BUF_FRM_DISC:
8768 /* Post more buffers if possible */
8769 spin_lock_irqsave(&phba->hbalock, iflags);
8770 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
8771 spin_unlock_irqrestore(&phba->hbalock, iflags);
8772 workposted = true;
8773 break;
8774 }
8775out:
8776 return workposted;
8777
8778}
8779
8780/**
8781 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
8782 * @phba: Pointer to HBA context object.
8783 * @eqe: Pointer to fast-path event queue entry.
8784 *
8785 * This routine process a event queue entry from the slow-path event queue.
8786 * It will check the MajorCode and MinorCode to determine this is for a
8787 * completion event on a completion queue, if not, an error shall be logged
8788 * and just return. Otherwise, it will get to the corresponding completion
8789 * queue and process all the entries on that completion queue, rearm the
8790 * completion queue, and then return.
8791 *
8792 **/
8793static void
8794lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8795{
8796 struct lpfc_queue *cq = NULL, *childq, *speq;
8797 struct lpfc_cqe *cqe;
8798 bool workposted = false;
8799 int ecount = 0;
8800 uint16_t cqid;
8801
8802 if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
8803 bf_get(lpfc_eqe_minor_code, eqe) != 0) {
8804 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8805 "0359 Not a valid slow-path completion "
8806 "event: majorcode=x%x, minorcode=x%x\n",
8807 bf_get(lpfc_eqe_major_code, eqe),
8808 bf_get(lpfc_eqe_minor_code, eqe));
8809 return;
8810 }
8811
8812 /* Get the reference to the corresponding CQ */
8813 cqid = bf_get(lpfc_eqe_resource_id, eqe);
8814
8815 /* Search for completion queue pointer matching this cqid */
8816 speq = phba->sli4_hba.sp_eq;
8817 list_for_each_entry(childq, &speq->child_list, list) {
8818 if (childq->queue_id == cqid) {
8819 cq = childq;
8820 break;
8821 }
8822 }
8823 if (unlikely(!cq)) {
8824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8825 "0365 Slow-path CQ identifier (%d) does "
8826 "not exist\n", cqid);
8827 return;
8828 }
8829
8830 /* Process all the entries to the CQ */
8831 switch (cq->type) {
8832 case LPFC_MCQ:
8833 while ((cqe = lpfc_sli4_cq_get(cq))) {
8834 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
8835 if (!(++ecount % LPFC_GET_QE_REL_INT))
8836 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8837 }
8838 break;
8839 case LPFC_WCQ:
8840 while ((cqe = lpfc_sli4_cq_get(cq))) {
8841 workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe);
8842 if (!(++ecount % LPFC_GET_QE_REL_INT))
8843 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8844 }
8845 break;
8846 case LPFC_RCQ:
8847 while ((cqe = lpfc_sli4_cq_get(cq))) {
8848 workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
8849 if (!(++ecount % LPFC_GET_QE_REL_INT))
8850 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8851 }
8852 break;
8853 default:
8854 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8855 "0370 Invalid completion queue type (%d)\n",
8856 cq->type);
8857 return;
8858 }
8859
8860 /* Catch the no cq entry condition, log an error */
8861 if (unlikely(ecount == 0))
8862 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8863 "0371 No entry from the CQ: identifier "
8864 "(x%x), type (%d)\n", cq->queue_id, cq->type);
8865
8866 /* In any case, flash and re-arm the RCQ */
8867 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
8868
8869 /* wake up worker thread if there are works to be done */
8870 if (workposted)
8871 lpfc_worker_wake_up(phba);
8872}
8873
8874/**
8875 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
8876 * @eqe: Pointer to fast-path completion queue entry.
8877 *
8878 * This routine process a fast-path work queue completion entry from fast-path
8879 * event queue for FCP command response completion.
8880 **/
8881static void
8882lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
8883 struct lpfc_wcqe_complete *wcqe)
8884{
8885 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
8886 struct lpfc_iocbq *cmdiocbq;
8887 struct lpfc_iocbq irspiocbq;
8888 unsigned long iflags;
8889
8890 spin_lock_irqsave(&phba->hbalock, iflags);
8891 pring->stats.iocb_event++;
8892 spin_unlock_irqrestore(&phba->hbalock, iflags);
8893
8894 /* Check for response status */
8895 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
8896 /* If resource errors reported from HBA, reduce queue
8897 * depth of the SCSI device.
8898 */
8899 if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
8900 IOSTAT_LOCAL_REJECT) &&
8901 (wcqe->parameter == IOERR_NO_RESOURCES)) {
8902 phba->lpfc_rampdown_queue_depth(phba);
8903 }
8904 /* Log the error status */
8905 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8906 "0373 FCP complete error: status=x%x, "
8907 "hw_status=x%x, total_data_specified=%d, "
8908 "parameter=x%x, word3=x%x\n",
8909 bf_get(lpfc_wcqe_c_status, wcqe),
8910 bf_get(lpfc_wcqe_c_hw_status, wcqe),
8911 wcqe->total_data_placed, wcqe->parameter,
8912 wcqe->word3);
8913 }
8914
8915 /* Look up the FCP command IOCB and create pseudo response IOCB */
8916 spin_lock_irqsave(&phba->hbalock, iflags);
8917 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8918 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8919 spin_unlock_irqrestore(&phba->hbalock, iflags);
8920 if (unlikely(!cmdiocbq)) {
8921 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8922 "0374 FCP complete with no corresponding "
8923 "cmdiocb: iotag (%d)\n",
8924 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8925 return;
8926 }
8927 if (unlikely(!cmdiocbq->iocb_cmpl)) {
8928 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8929 "0375 FCP cmdiocb not callback function "
8930 "iotag: (%d)\n",
8931 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8932 return;
8933 }
8934
8935 /* Fake the irspiocb and copy necessary response information */
8936 lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
8937
8938 /* Pass the cmd_iocb and the rsp state to the upper layer */
8939 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
8940}
8941
8942/**
8943 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
8944 * @phba: Pointer to HBA context object.
8945 * @cq: Pointer to completion queue.
8946 * @wcqe: Pointer to work-queue completion queue entry.
8947 *
8948 * This routine handles an fast-path WQ entry comsumed event by invoking the
8949 * proper WQ release routine to the slow-path WQ.
8950 **/
8951static void
8952lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8953 struct lpfc_wcqe_release *wcqe)
8954{
8955 struct lpfc_queue *childwq;
8956 bool wqid_matched = false;
8957 uint16_t fcp_wqid;
8958
8959 /* Check for fast-path FCP work queue release */
8960 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
8961 list_for_each_entry(childwq, &cq->child_list, list) {
8962 if (childwq->queue_id == fcp_wqid) {
8963 lpfc_sli4_wq_release(childwq,
8964 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8965 wqid_matched = true;
8966 break;
8967 }
8968 }
8969 /* Report warning log message if no match found */
8970 if (wqid_matched != true)
8971 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8972 "2580 Fast-path wqe consume event carries "
8973 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
8974}
8975
8976/**
8977 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
8978 * @cq: Pointer to the completion queue.
8979 * @eqe: Pointer to fast-path completion queue entry.
8980 *
8981 * This routine process a fast-path work queue completion entry from fast-path
8982 * event queue for FCP command response completion.
8983 **/
8984static int
8985lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8986 struct lpfc_cqe *cqe)
8987{
8988 struct lpfc_wcqe_release wcqe;
8989 bool workposted = false;
8990
8991 /* Copy the work queue CQE and convert endian order if needed */
8992 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8993
8994 /* Check and process for different type of WCQE and dispatch */
8995 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8996 case CQE_CODE_COMPL_WQE:
8997 /* Process the WQ complete event */
8998 lpfc_sli4_fp_handle_fcp_wcqe(phba,
8999 (struct lpfc_wcqe_complete *)&wcqe);
9000 break;
9001 case CQE_CODE_RELEASE_WQE:
9002 /* Process the WQ release event */
9003 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
9004 (struct lpfc_wcqe_release *)&wcqe);
9005 break;
9006 case CQE_CODE_XRI_ABORTED:
9007 /* Process the WQ XRI abort event */
9008 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
9009 (struct sli4_wcqe_xri_aborted *)&wcqe);
9010 break;
9011 default:
9012 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9013 "0144 Not a valid WCQE code: x%x\n",
9014 bf_get(lpfc_wcqe_c_code, &wcqe));
9015 break;
9016 }
9017 return workposted;
9018}
9019
9020/**
9021 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
9022 * @phba: Pointer to HBA context object.
9023 * @eqe: Pointer to fast-path event queue entry.
9024 *
9025 * This routine process a event queue entry from the fast-path event queue.
9026 * It will check the MajorCode and MinorCode to determine this is for a
9027 * completion event on a completion queue, if not, an error shall be logged
9028 * and just return. Otherwise, it will get to the corresponding completion
9029 * queue and process all the entries on the completion queue, rearm the
9030 * completion queue, and then return.
9031 **/
9032static void
9033lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9034 uint32_t fcp_cqidx)
9035{
9036 struct lpfc_queue *cq;
9037 struct lpfc_cqe *cqe;
9038 bool workposted = false;
9039 uint16_t cqid;
9040 int ecount = 0;
9041
9042 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
9043 unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
9044 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9045 "0366 Not a valid fast-path completion "
9046 "event: majorcode=x%x, minorcode=x%x\n",
9047 bf_get(lpfc_eqe_major_code, eqe),
9048 bf_get(lpfc_eqe_minor_code, eqe));
9049 return;
9050 }
9051
9052 cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
9053 if (unlikely(!cq)) {
9054 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9055 "0367 Fast-path completion queue does not "
9056 "exist\n");
9057 return;
9058 }
9059
9060 /* Get the reference to the corresponding CQ */
9061 cqid = bf_get(lpfc_eqe_resource_id, eqe);
9062 if (unlikely(cqid != cq->queue_id)) {
9063 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9064 "0368 Miss-matched fast-path completion "
9065 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
9066 cqid, cq->queue_id);
9067 return;
9068 }
9069
9070 /* Process all the entries to the CQ */
9071 while ((cqe = lpfc_sli4_cq_get(cq))) {
9072 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
9073 if (!(++ecount % LPFC_GET_QE_REL_INT))
9074 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
9075 }
9076
9077 /* Catch the no cq entry condition */
9078 if (unlikely(ecount == 0))
9079 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9080 "0369 No entry from fast-path completion "
9081 "queue fcpcqid=%d\n", cq->queue_id);
9082
9083 /* In any case, flash and re-arm the CQ */
9084 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
9085
9086 /* wake up worker thread if there are works to be done */
9087 if (workposted)
9088 lpfc_worker_wake_up(phba);
9089}
9090
9091static void
9092lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
9093{
9094 struct lpfc_eqe *eqe;
9095
9096 /* walk all the EQ entries and drop on the floor */
9097 while ((eqe = lpfc_sli4_eq_get(eq)))
9098 ;
9099
9100 /* Clear and re-arm the EQ */
9101 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
9102}
9103
9104/**
9105 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
9106 * @irq: Interrupt number.
9107 * @dev_id: The device context pointer.
9108 *
9109 * This function is directly called from the PCI layer as an interrupt
9110 * service routine when device with SLI-4 interface spec is enabled with
9111 * MSI-X multi-message interrupt mode and there are slow-path events in
9112 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
9113 * interrupt mode, this function is called as part of the device-level
9114 * interrupt handler. When the PCI slot is in error recovery or the HBA is
9115 * undergoing initialization, the interrupt handler will not process the
9116 * interrupt. The link attention and ELS ring attention events are handled
9117 * by the worker thread. The interrupt handler signals the worker thread
9118 * and returns for these events. This function is called without any lock
9119 * held. It gets the hbalock to access and update SLI data structures.
9120 *
9121 * This function returns IRQ_HANDLED when interrupt is handled else it
9122 * returns IRQ_NONE.
9123 **/
9124irqreturn_t
9125lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
9126{
9127 struct lpfc_hba *phba;
9128 struct lpfc_queue *speq;
9129 struct lpfc_eqe *eqe;
9130 unsigned long iflag;
9131 int ecount = 0;
9132
9133 /*
9134 * Get the driver's phba structure from the dev_id
9135 */
9136 phba = (struct lpfc_hba *)dev_id;
9137
9138 if (unlikely(!phba))
9139 return IRQ_NONE;
9140
9141 /* Get to the EQ struct associated with this vector */
9142 speq = phba->sli4_hba.sp_eq;
9143
9144 /* Check device state for handling interrupt */
9145 if (unlikely(lpfc_intr_state_check(phba))) {
9146 /* Check again for link_state with lock held */
9147 spin_lock_irqsave(&phba->hbalock, iflag);
9148 if (phba->link_state < LPFC_LINK_DOWN)
9149 /* Flush, clear interrupt, and rearm the EQ */
9150 lpfc_sli4_eq_flush(phba, speq);
9151 spin_unlock_irqrestore(&phba->hbalock, iflag);
9152 return IRQ_NONE;
9153 }
9154
9155 /*
9156 * Process all the event on FCP slow-path EQ
9157 */
9158 while ((eqe = lpfc_sli4_eq_get(speq))) {
9159 lpfc_sli4_sp_handle_eqe(phba, eqe);
9160 if (!(++ecount % LPFC_GET_QE_REL_INT))
9161 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
9162 }
9163
9164 /* Always clear and re-arm the slow-path EQ */
9165 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
9166
9167 /* Catch the no cq entry condition */
9168 if (unlikely(ecount == 0)) {
9169 if (phba->intr_type == MSIX)
9170 /* MSI-X treated interrupt served as no EQ share INT */
9171 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9172 "0357 MSI-X interrupt with no EQE\n");
9173 else
9174 /* Non MSI-X treated on interrupt as EQ share INT */
9175 return IRQ_NONE;
9176 }
9177
9178 return IRQ_HANDLED;
9179} /* lpfc_sli4_sp_intr_handler */
9180
9181/**
9182 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
9183 * @irq: Interrupt number.
9184 * @dev_id: The device context pointer.
9185 *
9186 * This function is directly called from the PCI layer as an interrupt
9187 * service routine when device with SLI-4 interface spec is enabled with
9188 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
9189 * ring event in the HBA. However, when the device is enabled with either
9190 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
9191 * device-level interrupt handler. When the PCI slot is in error recovery
9192 * or the HBA is undergoing initialization, the interrupt handler will not
9193 * process the interrupt. The SCSI FCP fast-path ring event are handled in
9194 * the intrrupt context. This function is called without any lock held.
9195 * It gets the hbalock to access and update SLI data structures. Note that,
9196 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
9197 * equal to that of FCP CQ index.
9198 *
9199 * This function returns IRQ_HANDLED when interrupt is handled else it
9200 * returns IRQ_NONE.
9201 **/
9202irqreturn_t
9203lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
9204{
9205 struct lpfc_hba *phba;
9206 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
9207 struct lpfc_queue *fpeq;
9208 struct lpfc_eqe *eqe;
9209 unsigned long iflag;
9210 int ecount = 0;
9211 uint32_t fcp_eqidx;
9212
9213 /* Get the driver's phba structure from the dev_id */
9214 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
9215 phba = fcp_eq_hdl->phba;
9216 fcp_eqidx = fcp_eq_hdl->idx;
9217
9218 if (unlikely(!phba))
9219 return IRQ_NONE;
9220
9221 /* Get to the EQ struct associated with this vector */
9222 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
9223
9224 /* Check device state for handling interrupt */
9225 if (unlikely(lpfc_intr_state_check(phba))) {
9226 /* Check again for link_state with lock held */
9227 spin_lock_irqsave(&phba->hbalock, iflag);
9228 if (phba->link_state < LPFC_LINK_DOWN)
9229 /* Flush, clear interrupt, and rearm the EQ */
9230 lpfc_sli4_eq_flush(phba, fpeq);
9231 spin_unlock_irqrestore(&phba->hbalock, iflag);
9232 return IRQ_NONE;
9233 }
9234
9235 /*
9236 * Process all the event on FCP fast-path EQ
9237 */
9238 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9239 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
9240 if (!(++ecount % LPFC_GET_QE_REL_INT))
9241 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
9242 }
9243
9244 /* Always clear and re-arm the fast-path EQ */
9245 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
9246
9247 if (unlikely(ecount == 0)) {
9248 if (phba->intr_type == MSIX)
9249 /* MSI-X treated interrupt served as no EQ share INT */
9250 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9251 "0358 MSI-X interrupt with no EQE\n");
9252 else
9253 /* Non MSI-X treated on interrupt as EQ share INT */
9254 return IRQ_NONE;
9255 }
9256
9257 return IRQ_HANDLED;
9258} /* lpfc_sli4_fp_intr_handler */
9259
9260/**
9261 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
9262 * @irq: Interrupt number.
9263 * @dev_id: The device context pointer.
9264 *
9265 * This function is the device-level interrupt handler to device with SLI-4
9266 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
9267 * interrupt mode is enabled and there is an event in the HBA which requires
9268 * driver attention. This function invokes the slow-path interrupt attention
9269 * handling function and fast-path interrupt attention handling function in
9270 * turn to process the relevant HBA attention events. This function is called
9271 * without any lock held. It gets the hbalock to access and update SLI data
9272 * structures.
9273 *
9274 * This function returns IRQ_HANDLED when interrupt is handled, else it
9275 * returns IRQ_NONE.
9276 **/
9277irqreturn_t
9278lpfc_sli4_intr_handler(int irq, void *dev_id)
9279{
9280 struct lpfc_hba *phba;
9281 irqreturn_t sp_irq_rc, fp_irq_rc;
9282 bool fp_handled = false;
9283 uint32_t fcp_eqidx;
9284
9285 /* Get the driver's phba structure from the dev_id */
9286 phba = (struct lpfc_hba *)dev_id;
9287
9288 if (unlikely(!phba))
9289 return IRQ_NONE;
9290
9291 /*
9292 * Invokes slow-path host attention interrupt handling as appropriate.
9293 */
9294 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
9295
9296 /*
9297 * Invoke fast-path host attention interrupt handling as appropriate.
9298 */
9299 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
9300 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
9301 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
9302 if (fp_irq_rc == IRQ_HANDLED)
9303 fp_handled |= true;
9304 }
9305
9306 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
9307} /* lpfc_sli4_intr_handler */
9308
9309/**
9310 * lpfc_sli4_queue_free - free a queue structure and associated memory
9311 * @queue: The queue structure to free.
9312 *
9313 * This function frees a queue structure and the DMAable memeory used for
9314 * the host resident queue. This function must be called after destroying the
9315 * queue on the HBA.
9316 **/
9317void
9318lpfc_sli4_queue_free(struct lpfc_queue *queue)
9319{
9320 struct lpfc_dmabuf *dmabuf;
9321
9322 if (!queue)
9323 return;
9324
9325 while (!list_empty(&queue->page_list)) {
9326 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
9327 list);
9328 dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
9329 dmabuf->virt, dmabuf->phys);
9330 kfree(dmabuf);
9331 }
9332 kfree(queue);
9333 return;
9334}
9335
9336/**
9337 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
9338 * @phba: The HBA that this queue is being created on.
9339 * @entry_size: The size of each queue entry for this queue.
9340 * @entry count: The number of entries that this queue will handle.
9341 *
9342 * This function allocates a queue structure and the DMAable memory used for
9343 * the host resident queue. This function must be called before creating the
9344 * queue on the HBA.
9345 **/
9346struct lpfc_queue *
9347lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9348 uint32_t entry_count)
9349{
9350 struct lpfc_queue *queue;
9351 struct lpfc_dmabuf *dmabuf;
9352 int x, total_qe_count;
9353 void *dma_pointer;
9354
9355
9356 queue = kzalloc(sizeof(struct lpfc_queue) +
9357 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
9358 if (!queue)
9359 return NULL;
9360 queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
9361 INIT_LIST_HEAD(&queue->list);
9362 INIT_LIST_HEAD(&queue->page_list);
9363 INIT_LIST_HEAD(&queue->child_list);
9364 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
9365 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9366 if (!dmabuf)
9367 goto out_fail;
9368 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9369 PAGE_SIZE, &dmabuf->phys,
9370 GFP_KERNEL);
9371 if (!dmabuf->virt) {
9372 kfree(dmabuf);
9373 goto out_fail;
9374 }
9375 memset(dmabuf->virt, 0, PAGE_SIZE);
9376 dmabuf->buffer_tag = x;
9377 list_add_tail(&dmabuf->list, &queue->page_list);
9378 /* initialize queue's entry array */
9379 dma_pointer = dmabuf->virt;
9380 for (; total_qe_count < entry_count &&
9381 dma_pointer < (PAGE_SIZE + dmabuf->virt);
9382 total_qe_count++, dma_pointer += entry_size) {
9383 queue->qe[total_qe_count].address = dma_pointer;
9384 }
9385 }
9386 queue->entry_size = entry_size;
9387 queue->entry_count = entry_count;
9388 queue->phba = phba;
9389
9390 return queue;
9391out_fail:
9392 lpfc_sli4_queue_free(queue);
9393 return NULL;
9394}
9395
9396/**
9397 * lpfc_eq_create - Create an Event Queue on the HBA
9398 * @phba: HBA structure that indicates port to create a queue on.
9399 * @eq: The queue structure to use to create the event queue.
9400 * @imax: The maximum interrupt per second limit.
9401 *
9402 * This function creates an event queue, as detailed in @eq, on a port,
9403 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
9404 *
9405 * The @phba struct is used to send mailbox command to HBA. The @eq struct
9406 * is used to get the entry count and entry size that are necessary to
9407 * determine the number of pages to allocate and use for this queue. This
9408 * function will send the EQ_CREATE mailbox command to the HBA to setup the
9409 * event queue. This function is asynchronous and will wait for the mailbox
9410 * command to finish before continuing.
9411 *
9412 * On success this function will return a zero. If unable to allocate enough
9413 * memory this function will return ENOMEM. If the queue create mailbox command
9414 * fails this function will return ENXIO.
9415 **/
9416uint32_t
9417lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9418{
9419 struct lpfc_mbx_eq_create *eq_create;
9420 LPFC_MBOXQ_t *mbox;
9421 int rc, length, status = 0;
9422 struct lpfc_dmabuf *dmabuf;
9423 uint32_t shdr_status, shdr_add_status;
9424 union lpfc_sli4_cfg_shdr *shdr;
9425 uint16_t dmult;
9426
9427 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9428 if (!mbox)
9429 return -ENOMEM;
9430 length = (sizeof(struct lpfc_mbx_eq_create) -
9431 sizeof(struct lpfc_sli4_cfg_mhdr));
9432 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9433 LPFC_MBOX_OPCODE_EQ_CREATE,
9434 length, LPFC_SLI4_MBX_EMBED);
9435 eq_create = &mbox->u.mqe.un.eq_create;
9436 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
9437 eq->page_count);
9438 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
9439 LPFC_EQE_SIZE);
9440 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
9441 /* Calculate delay multiper from maximum interrupt per second */
9442 dmult = LPFC_DMULT_CONST/imax - 1;
9443 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
9444 dmult);
9445 switch (eq->entry_count) {
9446 default:
9447 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9448 "0360 Unsupported EQ count. (%d)\n",
9449 eq->entry_count);
9450 if (eq->entry_count < 256)
9451 return -EINVAL;
9452 /* otherwise default to smallest count (drop through) */
9453 case 256:
9454 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9455 LPFC_EQ_CNT_256);
9456 break;
9457 case 512:
9458 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9459 LPFC_EQ_CNT_512);
9460 break;
9461 case 1024:
9462 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9463 LPFC_EQ_CNT_1024);
9464 break;
9465 case 2048:
9466 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9467 LPFC_EQ_CNT_2048);
9468 break;
9469 case 4096:
9470 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9471 LPFC_EQ_CNT_4096);
9472 break;
9473 }
9474 list_for_each_entry(dmabuf, &eq->page_list, list) {
9475 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9476 putPaddrLow(dmabuf->phys);
9477 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9478 putPaddrHigh(dmabuf->phys);
9479 }
9480 mbox->vport = phba->pport;
9481 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9482 mbox->context1 = NULL;
9483 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9484 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
9485 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9486 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9487 if (shdr_status || shdr_add_status || rc) {
9488 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9489 "2500 EQ_CREATE mailbox failed with "
9490 "status x%x add_status x%x, mbx status x%x\n",
9491 shdr_status, shdr_add_status, rc);
9492 status = -ENXIO;
9493 }
9494 eq->type = LPFC_EQ;
9495 eq->subtype = LPFC_NONE;
9496 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
9497 if (eq->queue_id == 0xFFFF)
9498 status = -ENXIO;
9499 eq->host_index = 0;
9500 eq->hba_index = 0;
9501
9502 if (rc != MBX_TIMEOUT)
9503 mempool_free(mbox, phba->mbox_mem_pool);
9504 return status;
9505}
9506
9507/**
9508 * lpfc_cq_create - Create a Completion Queue on the HBA
9509 * @phba: HBA structure that indicates port to create a queue on.
9510 * @cq: The queue structure to use to create the completion queue.
9511 * @eq: The event queue to bind this completion queue to.
9512 *
9513 * This function creates a completion queue, as detailed in @wq, on a port,
9514 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
9515 *
9516 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9517 * is used to get the entry count and entry size that are necessary to
9518 * determine the number of pages to allocate and use for this queue. The @eq
9519 * is used to indicate which event queue to bind this completion queue to. This
9520 * function will send the CQ_CREATE mailbox command to the HBA to setup the
9521 * completion queue. This function is asynchronous and will wait for the mailbox
9522 * command to finish before continuing.
9523 *
9524 * On success this function will return a zero. If unable to allocate enough
9525 * memory this function will return ENOMEM. If the queue create mailbox command
9526 * fails this function will return ENXIO.
9527 **/
9528uint32_t
9529lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
9530 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
9531{
9532 struct lpfc_mbx_cq_create *cq_create;
9533 struct lpfc_dmabuf *dmabuf;
9534 LPFC_MBOXQ_t *mbox;
9535 int rc, length, status = 0;
9536 uint32_t shdr_status, shdr_add_status;
9537 union lpfc_sli4_cfg_shdr *shdr;
9538
9539 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9540 if (!mbox)
9541 return -ENOMEM;
9542 length = (sizeof(struct lpfc_mbx_cq_create) -
9543 sizeof(struct lpfc_sli4_cfg_mhdr));
9544 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9545 LPFC_MBOX_OPCODE_CQ_CREATE,
9546 length, LPFC_SLI4_MBX_EMBED);
9547 cq_create = &mbox->u.mqe.un.cq_create;
9548 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
9549 cq->page_count);
9550 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
9551 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
9552 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
9553 switch (cq->entry_count) {
9554 default:
9555 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9556 "0361 Unsupported CQ count. (%d)\n",
9557 cq->entry_count);
9558 if (cq->entry_count < 256)
9559 return -EINVAL;
9560 /* otherwise default to smallest count (drop through) */
9561 case 256:
9562 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9563 LPFC_CQ_CNT_256);
9564 break;
9565 case 512:
9566 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9567 LPFC_CQ_CNT_512);
9568 break;
9569 case 1024:
9570 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9571 LPFC_CQ_CNT_1024);
9572 break;
9573 }
9574 list_for_each_entry(dmabuf, &cq->page_list, list) {
9575 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9576 putPaddrLow(dmabuf->phys);
9577 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9578 putPaddrHigh(dmabuf->phys);
9579 }
9580 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9581
9582 /* The IOCTL status is embedded in the mailbox subheader. */
9583 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
9584 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9585 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9586 if (shdr_status || shdr_add_status || rc) {
9587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9588 "2501 CQ_CREATE mailbox failed with "
9589 "status x%x add_status x%x, mbx status x%x\n",
9590 shdr_status, shdr_add_status, rc);
9591 status = -ENXIO;
9592 goto out;
9593 }
9594 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9595 if (cq->queue_id == 0xFFFF) {
9596 status = -ENXIO;
9597 goto out;
9598 }
9599 /* link the cq onto the parent eq child list */
9600 list_add_tail(&cq->list, &eq->child_list);
9601 /* Set up completion queue's type and subtype */
9602 cq->type = type;
9603 cq->subtype = subtype;
9604 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9605 cq->host_index = 0;
9606 cq->hba_index = 0;
9607out:
9608
9609 if (rc != MBX_TIMEOUT)
9610 mempool_free(mbox, phba->mbox_mem_pool);
9611 return status;
9612}
9613
9614/**
9615 * lpfc_mq_create - Create a mailbox Queue on the HBA
9616 * @phba: HBA structure that indicates port to create a queue on.
9617 * @mq: The queue structure to use to create the mailbox queue.
9618 *
9619 * This function creates a mailbox queue, as detailed in @mq, on a port,
9620 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
9621 *
9622 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9623 * is used to get the entry count and entry size that are necessary to
9624 * determine the number of pages to allocate and use for this queue. This
9625 * function will send the MQ_CREATE mailbox command to the HBA to setup the
9626 * mailbox queue. This function is asynchronous and will wait for the mailbox
9627 * command to finish before continuing.
9628 *
9629 * On success this function will return a zero. If unable to allocate enough
9630 * memory this function will return ENOMEM. If the queue create mailbox command
9631 * fails this function will return ENXIO.
9632 **/
9633uint32_t
9634lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9635 struct lpfc_queue *cq, uint32_t subtype)
9636{
9637 struct lpfc_mbx_mq_create *mq_create;
9638 struct lpfc_dmabuf *dmabuf;
9639 LPFC_MBOXQ_t *mbox;
9640 int rc, length, status = 0;
9641 uint32_t shdr_status, shdr_add_status;
9642 union lpfc_sli4_cfg_shdr *shdr;
9643
9644 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9645 if (!mbox)
9646 return -ENOMEM;
9647 length = (sizeof(struct lpfc_mbx_mq_create) -
9648 sizeof(struct lpfc_sli4_cfg_mhdr));
9649 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9650 LPFC_MBOX_OPCODE_MQ_CREATE,
9651 length, LPFC_SLI4_MBX_EMBED);
9652 mq_create = &mbox->u.mqe.un.mq_create;
9653 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
9654 mq->page_count);
9655 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
9656 cq->queue_id);
9657 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
9658 switch (mq->entry_count) {
9659 default:
9660 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9661 "0362 Unsupported MQ count. (%d)\n",
9662 mq->entry_count);
9663 if (mq->entry_count < 16)
9664 return -EINVAL;
9665 /* otherwise default to smallest count (drop through) */
9666 case 16:
9667 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9668 LPFC_MQ_CNT_16);
9669 break;
9670 case 32:
9671 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9672 LPFC_MQ_CNT_32);
9673 break;
9674 case 64:
9675 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9676 LPFC_MQ_CNT_64);
9677 break;
9678 case 128:
9679 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9680 LPFC_MQ_CNT_128);
9681 break;
9682 }
9683 list_for_each_entry(dmabuf, &mq->page_list, list) {
9684 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9685 putPaddrLow(dmabuf->phys);
9686 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9687 putPaddrHigh(dmabuf->phys);
9688 }
9689 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9690 /* The IOCTL status is embedded in the mailbox subheader. */
9691 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
9692 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9693 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9694 if (shdr_status || shdr_add_status || rc) {
9695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9696 "2502 MQ_CREATE mailbox failed with "
9697 "status x%x add_status x%x, mbx status x%x\n",
9698 shdr_status, shdr_add_status, rc);
9699 status = -ENXIO;
9700 goto out;
9701 }
9702 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
9703 if (mq->queue_id == 0xFFFF) {
9704 status = -ENXIO;
9705 goto out;
9706 }
9707 mq->type = LPFC_MQ;
9708 mq->subtype = subtype;
9709 mq->host_index = 0;
9710 mq->hba_index = 0;
9711
9712 /* link the mq onto the parent cq child list */
9713 list_add_tail(&mq->list, &cq->child_list);
9714out:
9715 if (rc != MBX_TIMEOUT)
9716 mempool_free(mbox, phba->mbox_mem_pool);
9717 return status;
9718}
9719
9720/**
9721 * lpfc_wq_create - Create a Work Queue on the HBA
9722 * @phba: HBA structure that indicates port to create a queue on.
9723 * @wq: The queue structure to use to create the work queue.
9724 * @cq: The completion queue to bind this work queue to.
9725 * @subtype: The subtype of the work queue indicating its functionality.
9726 *
9727 * This function creates a work queue, as detailed in @wq, on a port, described
9728 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
9729 *
9730 * The @phba struct is used to send mailbox command to HBA. The @wq struct
9731 * is used to get the entry count and entry size that are necessary to
9732 * determine the number of pages to allocate and use for this queue. The @cq
9733 * is used to indicate which completion queue to bind this work queue to. This
9734 * function will send the WQ_CREATE mailbox command to the HBA to setup the
9735 * work queue. This function is asynchronous and will wait for the mailbox
9736 * command to finish before continuing.
9737 *
9738 * On success this function will return a zero. If unable to allocate enough
9739 * memory this function will return ENOMEM. If the queue create mailbox command
9740 * fails this function will return ENXIO.
9741 **/
9742uint32_t
9743lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9744 struct lpfc_queue *cq, uint32_t subtype)
9745{
9746 struct lpfc_mbx_wq_create *wq_create;
9747 struct lpfc_dmabuf *dmabuf;
9748 LPFC_MBOXQ_t *mbox;
9749 int rc, length, status = 0;
9750 uint32_t shdr_status, shdr_add_status;
9751 union lpfc_sli4_cfg_shdr *shdr;
9752
9753 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9754 if (!mbox)
9755 return -ENOMEM;
9756 length = (sizeof(struct lpfc_mbx_wq_create) -
9757 sizeof(struct lpfc_sli4_cfg_mhdr));
9758 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9759 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
9760 length, LPFC_SLI4_MBX_EMBED);
9761 wq_create = &mbox->u.mqe.un.wq_create;
9762 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
9763 wq->page_count);
9764 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
9765 cq->queue_id);
9766 list_for_each_entry(dmabuf, &wq->page_list, list) {
9767 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9768 putPaddrLow(dmabuf->phys);
9769 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9770 putPaddrHigh(dmabuf->phys);
9771 }
9772 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9773 /* The IOCTL status is embedded in the mailbox subheader. */
9774 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
9775 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9776 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9777 if (shdr_status || shdr_add_status || rc) {
9778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9779 "2503 WQ_CREATE mailbox failed with "
9780 "status x%x add_status x%x, mbx status x%x\n",
9781 shdr_status, shdr_add_status, rc);
9782 status = -ENXIO;
9783 goto out;
9784 }
9785 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
9786 if (wq->queue_id == 0xFFFF) {
9787 status = -ENXIO;
9788 goto out;
9789 }
9790 wq->type = LPFC_WQ;
9791 wq->subtype = subtype;
9792 wq->host_index = 0;
9793 wq->hba_index = 0;
9794
9795 /* link the wq onto the parent cq child list */
9796 list_add_tail(&wq->list, &cq->child_list);
9797out:
9798 if (rc != MBX_TIMEOUT)
9799 mempool_free(mbox, phba->mbox_mem_pool);
9800 return status;
9801}
9802
9803/**
9804 * lpfc_rq_create - Create a Receive Queue on the HBA
9805 * @phba: HBA structure that indicates port to create a queue on.
9806 * @hrq: The queue structure to use to create the header receive queue.
9807 * @drq: The queue structure to use to create the data receive queue.
9808 * @cq: The completion queue to bind this work queue to.
9809 *
9810 * This function creates a receive buffer queue pair , as detailed in @hrq and
9811 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
9812 * to the HBA.
9813 *
9814 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
9815 * struct is used to get the entry count that is necessary to determine the
9816 * number of pages to use for this queue. The @cq is used to indicate which
9817 * completion queue to bind received buffers that are posted to these queues to.
9818 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
9819 * receive queue pair. This function is asynchronous and will wait for the
9820 * mailbox command to finish before continuing.
9821 *
9822 * On success this function will return a zero. If unable to allocate enough
9823 * memory this function will return ENOMEM. If the queue create mailbox command
9824 * fails this function will return ENXIO.
9825 **/
9826uint32_t
9827lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
9828 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
9829{
9830 struct lpfc_mbx_rq_create *rq_create;
9831 struct lpfc_dmabuf *dmabuf;
9832 LPFC_MBOXQ_t *mbox;
9833 int rc, length, status = 0;
9834 uint32_t shdr_status, shdr_add_status;
9835 union lpfc_sli4_cfg_shdr *shdr;
9836
9837 if (hrq->entry_count != drq->entry_count)
9838 return -EINVAL;
9839 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9840 if (!mbox)
9841 return -ENOMEM;
9842 length = (sizeof(struct lpfc_mbx_rq_create) -
9843 sizeof(struct lpfc_sli4_cfg_mhdr));
9844 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9845 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9846 length, LPFC_SLI4_MBX_EMBED);
9847 rq_create = &mbox->u.mqe.un.rq_create;
9848 switch (hrq->entry_count) {
9849 default:
9850 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9851 "2535 Unsupported RQ count. (%d)\n",
9852 hrq->entry_count);
9853 if (hrq->entry_count < 512)
9854 return -EINVAL;
9855 /* otherwise default to smallest count (drop through) */
9856 case 512:
9857 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9858 LPFC_RQ_RING_SIZE_512);
9859 break;
9860 case 1024:
9861 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9862 LPFC_RQ_RING_SIZE_1024);
9863 break;
9864 case 2048:
9865 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9866 LPFC_RQ_RING_SIZE_2048);
9867 break;
9868 case 4096:
9869 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9870 LPFC_RQ_RING_SIZE_4096);
9871 break;
9872 }
9873 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9874 cq->queue_id);
9875 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9876 hrq->page_count);
9877 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9878 LPFC_HDR_BUF_SIZE);
9879 list_for_each_entry(dmabuf, &hrq->page_list, list) {
9880 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9881 putPaddrLow(dmabuf->phys);
9882 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9883 putPaddrHigh(dmabuf->phys);
9884 }
9885 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9886 /* The IOCTL status is embedded in the mailbox subheader. */
9887 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9888 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9889 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9890 if (shdr_status || shdr_add_status || rc) {
9891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9892 "2504 RQ_CREATE mailbox failed with "
9893 "status x%x add_status x%x, mbx status x%x\n",
9894 shdr_status, shdr_add_status, rc);
9895 status = -ENXIO;
9896 goto out;
9897 }
9898 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9899 if (hrq->queue_id == 0xFFFF) {
9900 status = -ENXIO;
9901 goto out;
9902 }
9903 hrq->type = LPFC_HRQ;
9904 hrq->subtype = subtype;
9905 hrq->host_index = 0;
9906 hrq->hba_index = 0;
9907
9908 /* now create the data queue */
9909 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9910 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9911 length, LPFC_SLI4_MBX_EMBED);
9912 switch (drq->entry_count) {
9913 default:
9914 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9915 "2536 Unsupported RQ count. (%d)\n",
9916 drq->entry_count);
9917 if (drq->entry_count < 512)
9918 return -EINVAL;
9919 /* otherwise default to smallest count (drop through) */
9920 case 512:
9921 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9922 LPFC_RQ_RING_SIZE_512);
9923 break;
9924 case 1024:
9925 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9926 LPFC_RQ_RING_SIZE_1024);
9927 break;
9928 case 2048:
9929 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9930 LPFC_RQ_RING_SIZE_2048);
9931 break;
9932 case 4096:
9933 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9934 LPFC_RQ_RING_SIZE_4096);
9935 break;
9936 }
9937 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9938 cq->queue_id);
9939 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9940 drq->page_count);
9941 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9942 LPFC_DATA_BUF_SIZE);
9943 list_for_each_entry(dmabuf, &drq->page_list, list) {
9944 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9945 putPaddrLow(dmabuf->phys);
9946 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9947 putPaddrHigh(dmabuf->phys);
9948 }
9949 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9950 /* The IOCTL status is embedded in the mailbox subheader. */
9951 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9952 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9953 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9954 if (shdr_status || shdr_add_status || rc) {
9955 status = -ENXIO;
9956 goto out;
9957 }
9958 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9959 if (drq->queue_id == 0xFFFF) {
9960 status = -ENXIO;
9961 goto out;
9962 }
9963 drq->type = LPFC_DRQ;
9964 drq->subtype = subtype;
9965 drq->host_index = 0;
9966 drq->hba_index = 0;
9967
9968 /* link the header and data RQs onto the parent cq child list */
9969 list_add_tail(&hrq->list, &cq->child_list);
9970 list_add_tail(&drq->list, &cq->child_list);
9971
9972out:
9973 if (rc != MBX_TIMEOUT)
9974 mempool_free(mbox, phba->mbox_mem_pool);
9975 return status;
9976}
9977
9978/**
9979 * lpfc_eq_destroy - Destroy an event Queue on the HBA
9980 * @eq: The queue structure associated with the queue to destroy.
9981 *
9982 * This function destroys a queue, as detailed in @eq by sending an mailbox
9983 * command, specific to the type of queue, to the HBA.
9984 *
9985 * The @eq struct is used to get the queue ID of the queue to destroy.
9986 *
9987 * On success this function will return a zero. If the queue destroy mailbox
9988 * command fails this function will return ENXIO.
9989 **/
9990uint32_t
9991lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
9992{
9993 LPFC_MBOXQ_t *mbox;
9994 int rc, length, status = 0;
9995 uint32_t shdr_status, shdr_add_status;
9996 union lpfc_sli4_cfg_shdr *shdr;
9997
9998 if (!eq)
9999 return -ENODEV;
10000 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
10001 if (!mbox)
10002 return -ENOMEM;
10003 length = (sizeof(struct lpfc_mbx_eq_destroy) -
10004 sizeof(struct lpfc_sli4_cfg_mhdr));
10005 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
10006 LPFC_MBOX_OPCODE_EQ_DESTROY,
10007 length, LPFC_SLI4_MBX_EMBED);
10008 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
10009 eq->queue_id);
10010 mbox->vport = eq->phba->pport;
10011 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10012
10013 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
10014 /* The IOCTL status is embedded in the mailbox subheader. */
10015 shdr = (union lpfc_sli4_cfg_shdr *)
10016 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
10017 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10018 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10019 if (shdr_status || shdr_add_status || rc) {
10020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10021 "2505 EQ_DESTROY mailbox failed with "
10022 "status x%x add_status x%x, mbx status x%x\n",
10023 shdr_status, shdr_add_status, rc);
10024 status = -ENXIO;
10025 }
10026
10027 /* Remove eq from any list */
10028 list_del_init(&eq->list);
10029 if (rc != MBX_TIMEOUT)
10030 mempool_free(mbox, eq->phba->mbox_mem_pool);
10031 return status;
10032}
10033
10034/**
10035 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
10036 * @cq: The queue structure associated with the queue to destroy.
10037 *
10038 * This function destroys a queue, as detailed in @cq by sending an mailbox
10039 * command, specific to the type of queue, to the HBA.
10040 *
10041 * The @cq struct is used to get the queue ID of the queue to destroy.
10042 *
10043 * On success this function will return a zero. If the queue destroy mailbox
10044 * command fails this function will return ENXIO.
10045 **/
10046uint32_t
10047lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
10048{
10049 LPFC_MBOXQ_t *mbox;
10050 int rc, length, status = 0;
10051 uint32_t shdr_status, shdr_add_status;
10052 union lpfc_sli4_cfg_shdr *shdr;
10053
10054 if (!cq)
10055 return -ENODEV;
10056 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
10057 if (!mbox)
10058 return -ENOMEM;
10059 length = (sizeof(struct lpfc_mbx_cq_destroy) -
10060 sizeof(struct lpfc_sli4_cfg_mhdr));
10061 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
10062 LPFC_MBOX_OPCODE_CQ_DESTROY,
10063 length, LPFC_SLI4_MBX_EMBED);
10064 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
10065 cq->queue_id);
10066 mbox->vport = cq->phba->pport;
10067 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10068 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
10069 /* The IOCTL status is embedded in the mailbox subheader. */
10070 shdr = (union lpfc_sli4_cfg_shdr *)
10071 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
10072 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10073 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10074 if (shdr_status || shdr_add_status || rc) {
10075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10076 "2506 CQ_DESTROY mailbox failed with "
10077 "status x%x add_status x%x, mbx status x%x\n",
10078 shdr_status, shdr_add_status, rc);
10079 status = -ENXIO;
10080 }
10081 /* Remove cq from any list */
10082 list_del_init(&cq->list);
10083 if (rc != MBX_TIMEOUT)
10084 mempool_free(mbox, cq->phba->mbox_mem_pool);
10085 return status;
10086}
10087
10088/**
10089 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
10090 * @qm: The queue structure associated with the queue to destroy.
10091 *
10092 * This function destroys a queue, as detailed in @mq by sending an mailbox
10093 * command, specific to the type of queue, to the HBA.
10094 *
10095 * The @mq struct is used to get the queue ID of the queue to destroy.
10096 *
10097 * On success this function will return a zero. If the queue destroy mailbox
10098 * command fails this function will return ENXIO.
10099 **/
10100uint32_t
10101lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
10102{
10103 LPFC_MBOXQ_t *mbox;
10104 int rc, length, status = 0;
10105 uint32_t shdr_status, shdr_add_status;
10106 union lpfc_sli4_cfg_shdr *shdr;
10107
10108 if (!mq)
10109 return -ENODEV;
10110 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
10111 if (!mbox)
10112 return -ENOMEM;
10113 length = (sizeof(struct lpfc_mbx_mq_destroy) -
10114 sizeof(struct lpfc_sli4_cfg_mhdr));
10115 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
10116 LPFC_MBOX_OPCODE_MQ_DESTROY,
10117 length, LPFC_SLI4_MBX_EMBED);
10118 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
10119 mq->queue_id);
10120 mbox->vport = mq->phba->pport;
10121 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10122 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
10123 /* The IOCTL status is embedded in the mailbox subheader. */
10124 shdr = (union lpfc_sli4_cfg_shdr *)
10125 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
10126 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10127 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10128 if (shdr_status || shdr_add_status || rc) {
10129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10130 "2507 MQ_DESTROY mailbox failed with "
10131 "status x%x add_status x%x, mbx status x%x\n",
10132 shdr_status, shdr_add_status, rc);
10133 status = -ENXIO;
10134 }
10135 /* Remove mq from any list */
10136 list_del_init(&mq->list);
10137 if (rc != MBX_TIMEOUT)
10138 mempool_free(mbox, mq->phba->mbox_mem_pool);
10139 return status;
10140}
10141
10142/**
10143 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
10144 * @wq: The queue structure associated with the queue to destroy.
10145 *
10146 * This function destroys a queue, as detailed in @wq by sending an mailbox
10147 * command, specific to the type of queue, to the HBA.
10148 *
10149 * The @wq struct is used to get the queue ID of the queue to destroy.
10150 *
10151 * On success this function will return a zero. If the queue destroy mailbox
10152 * command fails this function will return ENXIO.
10153 **/
10154uint32_t
10155lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
10156{
10157 LPFC_MBOXQ_t *mbox;
10158 int rc, length, status = 0;
10159 uint32_t shdr_status, shdr_add_status;
10160 union lpfc_sli4_cfg_shdr *shdr;
10161
10162 if (!wq)
10163 return -ENODEV;
10164 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
10165 if (!mbox)
10166 return -ENOMEM;
10167 length = (sizeof(struct lpfc_mbx_wq_destroy) -
10168 sizeof(struct lpfc_sli4_cfg_mhdr));
10169 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10170 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
10171 length, LPFC_SLI4_MBX_EMBED);
10172 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
10173 wq->queue_id);
10174 mbox->vport = wq->phba->pport;
10175 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10176 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
10177 shdr = (union lpfc_sli4_cfg_shdr *)
10178 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
10179 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10180 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10181 if (shdr_status || shdr_add_status || rc) {
10182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10183 "2508 WQ_DESTROY mailbox failed with "
10184 "status x%x add_status x%x, mbx status x%x\n",
10185 shdr_status, shdr_add_status, rc);
10186 status = -ENXIO;
10187 }
10188 /* Remove wq from any list */
10189 list_del_init(&wq->list);
10190 if (rc != MBX_TIMEOUT)
10191 mempool_free(mbox, wq->phba->mbox_mem_pool);
10192 return status;
10193}
10194
10195/**
10196 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
10197 * @rq: The queue structure associated with the queue to destroy.
10198 *
10199 * This function destroys a queue, as detailed in @rq by sending an mailbox
10200 * command, specific to the type of queue, to the HBA.
10201 *
10202 * The @rq struct is used to get the queue ID of the queue to destroy.
10203 *
10204 * On success this function will return a zero. If the queue destroy mailbox
10205 * command fails this function will return ENXIO.
10206 **/
10207uint32_t
10208lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10209 struct lpfc_queue *drq)
10210{
10211 LPFC_MBOXQ_t *mbox;
10212 int rc, length, status = 0;
10213 uint32_t shdr_status, shdr_add_status;
10214 union lpfc_sli4_cfg_shdr *shdr;
10215
10216 if (!hrq || !drq)
10217 return -ENODEV;
10218 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
10219 if (!mbox)
10220 return -ENOMEM;
10221 length = (sizeof(struct lpfc_mbx_rq_destroy) -
10222 sizeof(struct mbox_header));
10223 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10224 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
10225 length, LPFC_SLI4_MBX_EMBED);
10226 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10227 hrq->queue_id);
10228 mbox->vport = hrq->phba->pport;
10229 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10230 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
10231 /* The IOCTL status is embedded in the mailbox subheader. */
10232 shdr = (union lpfc_sli4_cfg_shdr *)
10233 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10234 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10235 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10236 if (shdr_status || shdr_add_status || rc) {
10237 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10238 "2509 RQ_DESTROY mailbox failed with "
10239 "status x%x add_status x%x, mbx status x%x\n",
10240 shdr_status, shdr_add_status, rc);
10241 if (rc != MBX_TIMEOUT)
10242 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10243 return -ENXIO;
10244 }
10245 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10246 drq->queue_id);
10247 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
10248 shdr = (union lpfc_sli4_cfg_shdr *)
10249 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10250 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10251 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10252 if (shdr_status || shdr_add_status || rc) {
10253 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10254 "2510 RQ_DESTROY mailbox failed with "
10255 "status x%x add_status x%x, mbx status x%x\n",
10256 shdr_status, shdr_add_status, rc);
10257 status = -ENXIO;
10258 }
10259 list_del_init(&hrq->list);
10260 list_del_init(&drq->list);
10261 if (rc != MBX_TIMEOUT)
10262 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10263 return status;
10264}
10265
10266/**
10267 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
10268 * @phba: The virtual port for which this call being executed.
10269 * @pdma_phys_addr0: Physical address of the 1st SGL page.
10270 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
10271 * @xritag: the xritag that ties this io to the SGL pages.
10272 *
10273 * This routine will post the sgl pages for the IO that has the xritag
10274 * that is in the iocbq structure. The xritag is assigned during iocbq
10275 * creation and persists for as long as the driver is loaded.
10276 * if the caller has fewer than 256 scatter gather segments to map then
10277 * pdma_phys_addr1 should be 0.
10278 * If the caller needs to map more than 256 scatter gather segment then
10279 * pdma_phys_addr1 should be a valid physical address.
10280 * physical address for SGLs must be 64 byte aligned.
10281 * If you are going to map 2 SGL's then the first one must have 256 entries
10282 * the second sgl can have between 1 and 256 entries.
10283 *
10284 * Return codes:
10285 * 0 - Success
10286 * -ENXIO, -ENOMEM - Failure
10287 **/
10288int
10289lpfc_sli4_post_sgl(struct lpfc_hba *phba,
10290 dma_addr_t pdma_phys_addr0,
10291 dma_addr_t pdma_phys_addr1,
10292 uint16_t xritag)
10293{
10294 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
10295 LPFC_MBOXQ_t *mbox;
10296 int rc;
10297 uint32_t shdr_status, shdr_add_status;
10298 union lpfc_sli4_cfg_shdr *shdr;
10299
10300 if (xritag == NO_XRI) {
10301 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10302 "0364 Invalid param:\n");
10303 return -EINVAL;
10304 }
10305
10306 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10307 if (!mbox)
10308 return -ENOMEM;
10309
10310 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10311 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
10312 sizeof(struct lpfc_mbx_post_sgl_pages) -
10313 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
10314
10315 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
10316 &mbox->u.mqe.un.post_sgl_pages;
10317 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
10318 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
10319
10320 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
10321 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
10322 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
10323 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
10324
10325 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
10326 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
10327 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
10328 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
10329 if (!phba->sli4_hba.intr_enable)
10330 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10331 else
10332 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10333 /* The IOCTL status is embedded in the mailbox subheader. */
10334 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
10335 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10336 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10337 if (rc != MBX_TIMEOUT)
10338 mempool_free(mbox, phba->mbox_mem_pool);
10339 if (shdr_status || shdr_add_status || rc) {
10340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10341 "2511 POST_SGL mailbox failed with "
10342 "status x%x add_status x%x, mbx status x%x\n",
10343 shdr_status, shdr_add_status, rc);
10344 rc = -ENXIO;
10345 }
10346 return 0;
10347}
10348/**
10349 * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA
10350 * @phba: The virtual port for which this call being executed.
10351 *
10352 * This routine will remove all of the sgl pages registered with the hba.
10353 *
10354 * Return codes:
10355 * 0 - Success
10356 * -ENXIO, -ENOMEM - Failure
10357 **/
10358int
10359lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
10360{
10361 LPFC_MBOXQ_t *mbox;
10362 int rc;
10363 uint32_t shdr_status, shdr_add_status;
10364 union lpfc_sli4_cfg_shdr *shdr;
10365
10366 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10367 if (!mbox)
10368 return -ENOMEM;
10369
10370 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10371 LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
10372 LPFC_SLI4_MBX_EMBED);
10373 if (!phba->sli4_hba.intr_enable)
10374 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10375 else
10376 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10377 /* The IOCTL status is embedded in the mailbox subheader. */
10378 shdr = (union lpfc_sli4_cfg_shdr *)
10379 &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
10380 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10381 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10382 if (rc != MBX_TIMEOUT)
10383 mempool_free(mbox, phba->mbox_mem_pool);
10384 if (shdr_status || shdr_add_status || rc) {
10385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10386 "2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
10387 "status x%x add_status x%x, mbx status x%x\n",
10388 shdr_status, shdr_add_status, rc);
10389 rc = -ENXIO;
10390 }
10391 return rc;
10392}
10393
10394/**
10395 * lpfc_sli4_next_xritag - Get an xritag for the io
10396 * @phba: Pointer to HBA context object.
10397 *
10398 * This function gets an xritag for the iocb. If there is no unused xritag
10399 * it will return 0xffff.
10400 * The function returns the allocated xritag if successful, else returns zero.
10401 * Zero is not a valid xritag.
10402 * The caller is not required to hold any lock.
10403 **/
10404uint16_t
10405lpfc_sli4_next_xritag(struct lpfc_hba *phba)
10406{
10407 uint16_t xritag;
10408
10409 spin_lock_irq(&phba->hbalock);
10410 xritag = phba->sli4_hba.next_xri;
10411 if ((xritag != (uint16_t) -1) && xritag <
10412 (phba->sli4_hba.max_cfg_param.max_xri
10413 + phba->sli4_hba.max_cfg_param.xri_base)) {
10414 phba->sli4_hba.next_xri++;
10415 phba->sli4_hba.max_cfg_param.xri_used++;
10416 spin_unlock_irq(&phba->hbalock);
10417 return xritag;
10418 }
10419 spin_unlock_irq(&phba->hbalock);
10420
10421 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10422 "2004 Failed to allocate XRI.last XRITAG is %d"
10423 " Max XRI is %d, Used XRI is %d\n",
10424 phba->sli4_hba.next_xri,
10425 phba->sli4_hba.max_cfg_param.max_xri,
10426 phba->sli4_hba.max_cfg_param.xri_used);
10427 return -1;
10428}
10429
10430/**
10431 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
10432 * @phba: pointer to lpfc hba data structure.
10433 *
10434 * This routine is invoked to post a block of driver's sgl pages to the
10435 * HBA using non-embedded mailbox command. No Lock is held. This routine
10436 * is only called when the driver is loading and after all IO has been
10437 * stopped.
10438 **/
10439int
10440lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10441{
10442 struct lpfc_sglq *sglq_entry;
10443 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10444 struct sgl_page_pairs *sgl_pg_pairs;
10445 void *viraddr;
10446 LPFC_MBOXQ_t *mbox;
10447 uint32_t reqlen, alloclen, pg_pairs;
10448 uint32_t mbox_tmo;
10449 uint16_t xritag_start = 0;
10450 int els_xri_cnt, rc = 0;
10451 uint32_t shdr_status, shdr_add_status;
10452 union lpfc_sli4_cfg_shdr *shdr;
10453
10454 /* The number of sgls to be posted */
10455 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
10456
10457 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
10458 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10459 if (reqlen > PAGE_SIZE) {
10460 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10461 "2559 Block sgl registration required DMA "
10462 "size (%d) great than a page\n", reqlen);
10463 return -ENOMEM;
10464 }
10465 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10466 if (!mbox) {
10467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10468 "2560 Failed to allocate mbox cmd memory\n");
10469 return -ENOMEM;
10470 }
10471
10472 /* Allocate DMA memory and set up the non-embedded mailbox command */
10473 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10474 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10475 LPFC_SLI4_MBX_NEMBED);
10476
10477 if (alloclen < reqlen) {
10478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10479 "0285 Allocated DMA memory size (%d) is "
10480 "less than the requested DMA memory "
10481 "size (%d)\n", alloclen, reqlen);
10482 lpfc_sli4_mbox_cmd_free(phba, mbox);
10483 return -ENOMEM;
10484 }
10485
10486 /* Get the first SGE entry from the non-embedded DMA memory */
10487 if (unlikely(!mbox->sge_array)) {
10488 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10489 "2525 Failed to get the non-embedded SGE "
10490 "virtual address\n");
10491 lpfc_sli4_mbox_cmd_free(phba, mbox);
10492 return -ENOMEM;
10493 }
10494 viraddr = mbox->sge_array->addr[0];
10495
10496 /* Set up the SGL pages in the non-embedded DMA pages */
10497 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10498 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10499
10500 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
10501 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
10502 /* Set up the sge entry */
10503 sgl_pg_pairs->sgl_pg0_addr_lo =
10504 cpu_to_le32(putPaddrLow(sglq_entry->phys));
10505 sgl_pg_pairs->sgl_pg0_addr_hi =
10506 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
10507 sgl_pg_pairs->sgl_pg1_addr_lo =
10508 cpu_to_le32(putPaddrLow(0));
10509 sgl_pg_pairs->sgl_pg1_addr_hi =
10510 cpu_to_le32(putPaddrHigh(0));
10511 /* Keep the first xritag on the list */
10512 if (pg_pairs == 0)
10513 xritag_start = sglq_entry->sli4_xritag;
10514 sgl_pg_pairs++;
10515 }
10516 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10517 pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs;
10518 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10519 /* Perform endian conversion if necessary */
10520 sgl->word0 = cpu_to_le32(sgl->word0);
10521
10522 if (!phba->sli4_hba.intr_enable)
10523 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10524 else {
10525 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10526 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10527 }
10528 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10529 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10530 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10531 if (rc != MBX_TIMEOUT)
10532 lpfc_sli4_mbox_cmd_free(phba, mbox);
10533 if (shdr_status || shdr_add_status || rc) {
10534 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10535 "2513 POST_SGL_BLOCK mailbox command failed "
10536 "status x%x add_status x%x mbx status x%x\n",
10537 shdr_status, shdr_add_status, rc);
10538 rc = -ENXIO;
10539 }
10540 return rc;
10541}
10542
10543/**
10544 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
10545 * @phba: pointer to lpfc hba data structure.
10546 * @sblist: pointer to scsi buffer list.
10547 * @count: number of scsi buffers on the list.
10548 *
10549 * This routine is invoked to post a block of @count scsi sgl pages from a
10550 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
10551 * No Lock is held.
10552 *
10553 **/
10554int
10555lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
10556 int cnt)
10557{
10558 struct lpfc_scsi_buf *psb;
10559 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10560 struct sgl_page_pairs *sgl_pg_pairs;
10561 void *viraddr;
10562 LPFC_MBOXQ_t *mbox;
10563 uint32_t reqlen, alloclen, pg_pairs;
10564 uint32_t mbox_tmo;
10565 uint16_t xritag_start = 0;
10566 int rc = 0;
10567 uint32_t shdr_status, shdr_add_status;
10568 dma_addr_t pdma_phys_bpl1;
10569 union lpfc_sli4_cfg_shdr *shdr;
10570
10571 /* Calculate the requested length of the dma memory */
10572 reqlen = cnt * sizeof(struct sgl_page_pairs) +
10573 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10574 if (reqlen > PAGE_SIZE) {
10575 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10576 "0217 Block sgl registration required DMA "
10577 "size (%d) great than a page\n", reqlen);
10578 return -ENOMEM;
10579 }
10580 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10581 if (!mbox) {
10582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10583 "0283 Failed to allocate mbox cmd memory\n");
10584 return -ENOMEM;
10585 }
10586
10587 /* Allocate DMA memory and set up the non-embedded mailbox command */
10588 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10589 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10590 LPFC_SLI4_MBX_NEMBED);
10591
10592 if (alloclen < reqlen) {
10593 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10594 "2561 Allocated DMA memory size (%d) is "
10595 "less than the requested DMA memory "
10596 "size (%d)\n", alloclen, reqlen);
10597 lpfc_sli4_mbox_cmd_free(phba, mbox);
10598 return -ENOMEM;
10599 }
10600
10601 /* Get the first SGE entry from the non-embedded DMA memory */
10602 if (unlikely(!mbox->sge_array)) {
10603 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10604 "2565 Failed to get the non-embedded SGE "
10605 "virtual address\n");
10606 lpfc_sli4_mbox_cmd_free(phba, mbox);
10607 return -ENOMEM;
10608 }
10609 viraddr = mbox->sge_array->addr[0];
10610
10611 /* Set up the SGL pages in the non-embedded DMA pages */
10612 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10613 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10614
10615 pg_pairs = 0;
10616 list_for_each_entry(psb, sblist, list) {
10617 /* Set up the sge entry */
10618 sgl_pg_pairs->sgl_pg0_addr_lo =
10619 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
10620 sgl_pg_pairs->sgl_pg0_addr_hi =
10621 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
10622 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
10623 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
10624 else
10625 pdma_phys_bpl1 = 0;
10626 sgl_pg_pairs->sgl_pg1_addr_lo =
10627 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
10628 sgl_pg_pairs->sgl_pg1_addr_hi =
10629 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
10630 /* Keep the first xritag on the list */
10631 if (pg_pairs == 0)
10632 xritag_start = psb->cur_iocbq.sli4_xritag;
10633 sgl_pg_pairs++;
10634 pg_pairs++;
10635 }
10636 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10637 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10638 /* Perform endian conversion if necessary */
10639 sgl->word0 = cpu_to_le32(sgl->word0);
10640
10641 if (!phba->sli4_hba.intr_enable)
10642 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10643 else {
10644 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10645 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10646 }
10647 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10648 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10649 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10650 if (rc != MBX_TIMEOUT)
10651 lpfc_sli4_mbox_cmd_free(phba, mbox);
10652 if (shdr_status || shdr_add_status || rc) {
10653 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10654 "2564 POST_SGL_BLOCK mailbox command failed "
10655 "status x%x add_status x%x mbx status x%x\n",
10656 shdr_status, shdr_add_status, rc);
10657 rc = -ENXIO;
10658 }
10659 return rc;
10660}
10661
10662/**
10663 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
10664 * @phba: pointer to lpfc_hba struct that the frame was received on
10665 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10666 *
10667 * This function checks the fields in the @fc_hdr to see if the FC frame is a
10668 * valid type of frame that the LPFC driver will handle. This function will
10669 * return a zero if the frame is a valid frame or a non zero value when the
10670 * frame does not pass the check.
10671 **/
10672static int
10673lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
10674{
10675 char *rctl_names[] = FC_RCTL_NAMES_INIT;
10676 char *type_names[] = FC_TYPE_NAMES_INIT;
10677 struct fc_vft_header *fc_vft_hdr;
10678
10679 switch (fc_hdr->fh_r_ctl) {
10680 case FC_RCTL_DD_UNCAT: /* uncategorized information */
10681 case FC_RCTL_DD_SOL_DATA: /* solicited data */
10682 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
10683 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
10684 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
10685 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
10686 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
10687 case FC_RCTL_DD_CMD_STATUS: /* command status */
10688 case FC_RCTL_ELS_REQ: /* extended link services request */
10689 case FC_RCTL_ELS_REP: /* extended link services reply */
10690 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
10691 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
10692 case FC_RCTL_BA_NOP: /* basic link service NOP */
10693 case FC_RCTL_BA_ABTS: /* basic link service abort */
10694 case FC_RCTL_BA_RMC: /* remove connection */
10695 case FC_RCTL_BA_ACC: /* basic accept */
10696 case FC_RCTL_BA_RJT: /* basic reject */
10697 case FC_RCTL_BA_PRMT:
10698 case FC_RCTL_ACK_1: /* acknowledge_1 */
10699 case FC_RCTL_ACK_0: /* acknowledge_0 */
10700 case FC_RCTL_P_RJT: /* port reject */
10701 case FC_RCTL_F_RJT: /* fabric reject */
10702 case FC_RCTL_P_BSY: /* port busy */
10703 case FC_RCTL_F_BSY: /* fabric busy to data frame */
10704 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
10705 case FC_RCTL_LCR: /* link credit reset */
10706 case FC_RCTL_END: /* end */
10707 break;
10708 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
10709 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10710 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
10711 return lpfc_fc_frame_check(phba, fc_hdr);
10712 default:
10713 goto drop;
10714 }
10715 switch (fc_hdr->fh_type) {
10716 case FC_TYPE_BLS:
10717 case FC_TYPE_ELS:
10718 case FC_TYPE_FCP:
10719 case FC_TYPE_CT:
10720 break;
10721 case FC_TYPE_IP:
10722 case FC_TYPE_ILS:
10723 default:
10724 goto drop;
10725 }
10726 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10727 "2538 Received frame rctl:%s type:%s\n",
10728 rctl_names[fc_hdr->fh_r_ctl],
10729 type_names[fc_hdr->fh_type]);
10730 return 0;
10731drop:
10732 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
10733 "2539 Dropped frame rctl:%s type:%s\n",
10734 rctl_names[fc_hdr->fh_r_ctl],
10735 type_names[fc_hdr->fh_type]);
10736 return 1;
10737}
10738
10739/**
10740 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
10741 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10742 *
10743 * This function processes the FC header to retrieve the VFI from the VF
10744 * header, if one exists. This function will return the VFI if one exists
10745 * or 0 if no VSAN Header exists.
10746 **/
10747static uint32_t
10748lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
10749{
10750 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10751
10752 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
10753 return 0;
10754 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
10755}
10756
10757/**
10758 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
10759 * @phba: Pointer to the HBA structure to search for the vport on
10760 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10761 * @fcfi: The FC Fabric ID that the frame came from
10762 *
10763 * This function searches the @phba for a vport that matches the content of the
10764 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
10765 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
10766 * returns the matching vport pointer or NULL if unable to match frame to a
10767 * vport.
10768 **/
10769static struct lpfc_vport *
10770lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
10771 uint16_t fcfi)
10772{
10773 struct lpfc_vport **vports;
10774 struct lpfc_vport *vport = NULL;
10775 int i;
10776 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
10777 fc_hdr->fh_d_id[1] << 8 |
10778 fc_hdr->fh_d_id[2]);
10779
10780 vports = lpfc_create_vport_work_array(phba);
10781 if (vports != NULL)
10782 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
10783 if (phba->fcf.fcfi == fcfi &&
10784 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
10785 vports[i]->fc_myDID == did) {
10786 vport = vports[i];
10787 break;
10788 }
10789 }
10790 lpfc_destroy_vport_work_array(phba, vports);
10791 return vport;
10792}
10793
10794/**
10795 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
10796 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
10797 *
10798 * This function searches through the existing incomplete sequences that have
10799 * been sent to this @vport. If the frame matches one of the incomplete
10800 * sequences then the dbuf in the @dmabuf is added to the list of frames that
10801 * make up that sequence. If no sequence is found that matches this frame then
10802 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
10803 * This function returns a pointer to the first dmabuf in the sequence list that
10804 * the frame was linked to.
10805 **/
10806static struct hbq_dmabuf *
10807lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10808{
10809 struct fc_frame_header *new_hdr;
10810 struct fc_frame_header *temp_hdr;
10811 struct lpfc_dmabuf *d_buf;
10812 struct lpfc_dmabuf *h_buf;
10813 struct hbq_dmabuf *seq_dmabuf = NULL;
10814 struct hbq_dmabuf *temp_dmabuf = NULL;
10815
10816 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10817 /* Use the hdr_buf to find the sequence that this frame belongs to */
10818 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
10819 temp_hdr = (struct fc_frame_header *)h_buf->virt;
10820 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
10821 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
10822 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
10823 continue;
10824 /* found a pending sequence that matches this frame */
10825 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10826 break;
10827 }
10828 if (!seq_dmabuf) {
10829 /*
10830 * This indicates first frame received for this sequence.
10831 * Queue the buffer on the vport's rcv_buffer_list.
10832 */
10833 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10834 return dmabuf;
10835 }
10836 temp_hdr = seq_dmabuf->hbuf.virt;
10837 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
10838 list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list);
10839 return dmabuf;
10840 }
10841 /* find the correct place in the sequence to insert this frame */
10842 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10843 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10844 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
10845 /*
10846 * If the frame's sequence count is greater than the frame on
10847 * the list then insert the frame right after this frame
10848 */
10849 if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
10850 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
10851 return seq_dmabuf;
10852 }
10853 }
10854 return NULL;
10855}
10856
10857/**
10858 * lpfc_seq_complete - Indicates if a sequence is complete
10859 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10860 *
10861 * This function checks the sequence, starting with the frame described by
10862 * @dmabuf, to see if all the frames associated with this sequence are present.
10863 * the frames associated with this sequence are linked to the @dmabuf using the
10864 * dbuf list. This function looks for two major things. 1) That the first frame
10865 * has a sequence count of zero. 2) There is a frame with last frame of sequence
10866 * set. 3) That there are no holes in the sequence count. The function will
10867 * return 1 when the sequence is complete, otherwise it will return 0.
10868 **/
10869static int
10870lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
10871{
10872 struct fc_frame_header *hdr;
10873 struct lpfc_dmabuf *d_buf;
10874 struct hbq_dmabuf *seq_dmabuf;
10875 uint32_t fctl;
10876 int seq_count = 0;
10877
10878 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10879 /* make sure first fame of sequence has a sequence count of zero */
10880 if (hdr->fh_seq_cnt != seq_count)
10881 return 0;
10882 fctl = (hdr->fh_f_ctl[0] << 16 |
10883 hdr->fh_f_ctl[1] << 8 |
10884 hdr->fh_f_ctl[2]);
10885 /* If last frame of sequence we can return success. */
10886 if (fctl & FC_FC_END_SEQ)
10887 return 1;
10888 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
10889 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10890 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10891 /* If there is a hole in the sequence count then fail. */
10892 if (++seq_count != hdr->fh_seq_cnt)
10893 return 0;
10894 fctl = (hdr->fh_f_ctl[0] << 16 |
10895 hdr->fh_f_ctl[1] << 8 |
10896 hdr->fh_f_ctl[2]);
10897 /* If last frame of sequence we can return success. */
10898 if (fctl & FC_FC_END_SEQ)
10899 return 1;
10900 }
10901 return 0;
10902}
10903
10904/**
10905 * lpfc_prep_seq - Prep sequence for ULP processing
10906 * @vport: Pointer to the vport on which this sequence was received
10907 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10908 *
10909 * This function takes a sequence, described by a list of frames, and creates
10910 * a list of iocbq structures to describe the sequence. This iocbq list will be
10911 * used to issue to the generic unsolicited sequence handler. This routine
10912 * returns a pointer to the first iocbq in the list. If the function is unable
10913 * to allocate an iocbq then it throw out the received frames that were not
10914 * able to be described and return a pointer to the first iocbq. If unable to
10915 * allocate any iocbqs (including the first) this function will return NULL.
10916 **/
10917static struct lpfc_iocbq *
10918lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10919{
10920 struct lpfc_dmabuf *d_buf, *n_buf;
10921 struct lpfc_iocbq *first_iocbq, *iocbq;
10922 struct fc_frame_header *fc_hdr;
10923 uint32_t sid;
10924
10925 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10926 /* remove from receive buffer list */
10927 list_del_init(&seq_dmabuf->hbuf.list);
10928 /* get the Remote Port's SID */
10929 sid = (fc_hdr->fh_s_id[0] << 16 |
10930 fc_hdr->fh_s_id[1] << 8 |
10931 fc_hdr->fh_s_id[2]);
10932 /* Get an iocbq struct to fill in. */
10933 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
10934 if (first_iocbq) {
10935 /* Initialize the first IOCB. */
10936 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
10937 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
10938 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
10939 first_iocbq->iocb.unsli3.rcvsli3.vpi =
10940 vport->vpi + vport->phba->vpi_base;
10941 /* put the first buffer into the first IOCBq */
10942 first_iocbq->context2 = &seq_dmabuf->dbuf;
10943 first_iocbq->context3 = NULL;
10944 first_iocbq->iocb.ulpBdeCount = 1;
10945 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10946 LPFC_DATA_BUF_SIZE;
10947 first_iocbq->iocb.un.rcvels.remoteID = sid;
10948 }
10949 iocbq = first_iocbq;
10950 /*
10951 * Each IOCBq can have two Buffers assigned, so go through the list
10952 * of buffers for this sequence and save two buffers in each IOCBq
10953 */
10954 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
10955 if (!iocbq) {
10956 lpfc_in_buf_free(vport->phba, d_buf);
10957 continue;
10958 }
10959 if (!iocbq->context3) {
10960 iocbq->context3 = d_buf;
10961 iocbq->iocb.ulpBdeCount++;
10962 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
10963 LPFC_DATA_BUF_SIZE;
10964 } else {
10965 iocbq = lpfc_sli_get_iocbq(vport->phba);
10966 if (!iocbq) {
10967 if (first_iocbq) {
10968 first_iocbq->iocb.ulpStatus =
10969 IOSTAT_FCP_RSP_ERROR;
10970 first_iocbq->iocb.un.ulpWord[4] =
10971 IOERR_NO_RESOURCES;
10972 }
10973 lpfc_in_buf_free(vport->phba, d_buf);
10974 continue;
10975 }
10976 iocbq->context2 = d_buf;
10977 iocbq->context3 = NULL;
10978 iocbq->iocb.ulpBdeCount = 1;
10979 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10980 LPFC_DATA_BUF_SIZE;
10981 iocbq->iocb.un.rcvels.remoteID = sid;
10982 list_add_tail(&iocbq->list, &first_iocbq->list);
10983 }
10984 }
10985 return first_iocbq;
10986}
10987
10988/**
10989 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
10990 * @phba: Pointer to HBA context object.
10991 *
10992 * This function is called with no lock held. This function processes all
10993 * the received buffers and gives it to upper layers when a received buffer
10994 * indicates that it is the final frame in the sequence. The interrupt
10995 * service routine processes received buffers at interrupt contexts and adds
10996 * received dma buffers to the rb_pend_list queue and signals the worker thread.
10997 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
10998 * appropriate receive function when the final frame in a sequence is received.
10999 **/
11000int
11001lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
11002{
11003 LIST_HEAD(cmplq);
11004 struct hbq_dmabuf *dmabuf, *seq_dmabuf;
11005 struct fc_frame_header *fc_hdr;
11006 struct lpfc_vport *vport;
11007 uint32_t fcfi;
11008 struct lpfc_iocbq *iocbq;
11009
11010 /* Clear hba flag and get all received buffers into the cmplq */
11011 spin_lock_irq(&phba->hbalock);
11012 phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
11013 list_splice_init(&phba->rb_pend_list, &cmplq);
11014 spin_unlock_irq(&phba->hbalock);
11015
11016 /* Process each received buffer */
11017 while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) {
11018 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11019 /* check to see if this a valid type of frame */
11020 if (lpfc_fc_frame_check(phba, fc_hdr)) {
11021 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11022 continue;
11023 }
11024 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe);
11025 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
11026 if (!vport) {
11027 /* throw out the frame */
11028 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11029 continue;
11030 }
11031 /* Link this frame */
11032 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
11033 if (!seq_dmabuf) {
11034 /* unable to add frame to vport - throw it out */
11035 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11036 continue;
11037 }
11038 /* If not last frame in sequence continue processing frames. */
11039 if (!lpfc_seq_complete(seq_dmabuf)) {
11040 /*
11041 * When saving off frames post a new one and mark this
11042 * frame to be freed when it is finished.
11043 **/
11044 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
11045 dmabuf->tag = -1;
11046 continue;
11047 }
11048 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11049 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
11050 if (!lpfc_complete_unsol_iocb(phba,
11051 &phba->sli.ring[LPFC_ELS_RING],
11052 iocbq, fc_hdr->fh_r_ctl,
11053 fc_hdr->fh_type))
11054 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11055 "2540 Ring %d handler: unexpected Rctl "
11056 "x%x Type x%x received\n",
11057 LPFC_ELS_RING,
11058 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11059 };
11060 return 0;
11061}
11062
11063/**
11064 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
11065 * @phba: pointer to lpfc hba data structure.
11066 *
11067 * This routine is invoked to post rpi header templates to the
11068 * HBA consistent with the SLI-4 interface spec. This routine
11069 * posts a PAGE_SIZE memory region to the port to hold up to
11070 * PAGE_SIZE modulo 64 rpi context headers.
11071 *
11072 * This routine does not require any locks. It's usage is expected
11073 * to be driver load or reset recovery when the driver is
11074 * sequential.
11075 *
11076 * Return codes
11077 * 0 - sucessful
11078 * EIO - The mailbox failed to complete successfully.
11079 * When this error occurs, the driver is not guaranteed
11080 * to have any rpi regions posted to the device and
11081 * must either attempt to repost the regions or take a
11082 * fatal error.
11083 **/
11084int
11085lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
11086{
11087 struct lpfc_rpi_hdr *rpi_page;
11088 uint32_t rc = 0;
11089
11090 /* Post all rpi memory regions to the port. */
11091 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
11092 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
11093 if (rc != MBX_SUCCESS) {
11094 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11095 "2008 Error %d posting all rpi "
11096 "headers\n", rc);
11097 rc = -EIO;
11098 break;
11099 }
11100 }
11101
11102 return rc;
11103}
11104
11105/**
11106 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
11107 * @phba: pointer to lpfc hba data structure.
11108 * @rpi_page: pointer to the rpi memory region.
11109 *
11110 * This routine is invoked to post a single rpi header to the
11111 * HBA consistent with the SLI-4 interface spec. This memory region
11112 * maps up to 64 rpi context regions.
11113 *
11114 * Return codes
11115 * 0 - sucessful
11116 * ENOMEM - No available memory
11117 * EIO - The mailbox failed to complete successfully.
11118 **/
11119int
11120lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
11121{
11122 LPFC_MBOXQ_t *mboxq;
11123 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
11124 uint32_t rc = 0;
11125 uint32_t mbox_tmo;
11126 uint32_t shdr_status, shdr_add_status;
11127 union lpfc_sli4_cfg_shdr *shdr;
11128
11129 /* The port is notified of the header region via a mailbox command. */
11130 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11131 if (!mboxq) {
11132 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11133 "2001 Unable to allocate memory for issuing "
11134 "SLI_CONFIG_SPECIAL mailbox command\n");
11135 return -ENOMEM;
11136 }
11137
11138 /* Post all rpi memory regions to the port. */
11139 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
11140 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
11141 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11142 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
11143 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
11144 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
11145 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
11146 hdr_tmpl, rpi_page->page_count);
11147 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
11148 rpi_page->start_rpi);
11149 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
11150 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
11151 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11152 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
11153 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11154 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11155 if (rc != MBX_TIMEOUT)
11156 mempool_free(mboxq, phba->mbox_mem_pool);
11157 if (shdr_status || shdr_add_status || rc) {
11158 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11159 "2514 POST_RPI_HDR mailbox failed with "
11160 "status x%x add_status x%x, mbx status x%x\n",
11161 shdr_status, shdr_add_status, rc);
11162 rc = -ENXIO;
11163 }
11164 return rc;
11165}
11166
11167/**
11168 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
11169 * @phba: pointer to lpfc hba data structure.
11170 *
11171 * This routine is invoked to post rpi header templates to the
11172 * HBA consistent with the SLI-4 interface spec. This routine
11173 * posts a PAGE_SIZE memory region to the port to hold up to
11174 * PAGE_SIZE modulo 64 rpi context headers.
11175 *
11176 * Returns
11177 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful
11178 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
11179 **/
11180int
11181lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
11182{
11183 int rpi;
11184 uint16_t max_rpi, rpi_base, rpi_limit;
11185 uint16_t rpi_remaining;
11186 struct lpfc_rpi_hdr *rpi_hdr;
11187
11188 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
11189 rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
11190 rpi_limit = phba->sli4_hba.next_rpi;
11191
11192 /*
11193 * The valid rpi range is not guaranteed to be zero-based. Start
11194 * the search at the rpi_base as reported by the port.
11195 */
11196 spin_lock_irq(&phba->hbalock);
11197 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
11198 if (rpi >= rpi_limit || rpi < rpi_base)
11199 rpi = LPFC_RPI_ALLOC_ERROR;
11200 else {
11201 set_bit(rpi, phba->sli4_hba.rpi_bmask);
11202 phba->sli4_hba.max_cfg_param.rpi_used++;
11203 phba->sli4_hba.rpi_count++;
11204 }
11205
11206 /*
11207 * Don't try to allocate more rpi header regions if the device limit
11208 * on available rpis max has been exhausted.
11209 */
11210 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
11211 (phba->sli4_hba.rpi_count >= max_rpi)) {
11212 spin_unlock_irq(&phba->hbalock);
11213 return rpi;
11214 }
11215
11216 /*
11217 * If the driver is running low on rpi resources, allocate another
11218 * page now. Note that the next_rpi value is used because
11219 * it represents how many are actually in use whereas max_rpi notes
11220 * how many are supported max by the device.
11221 */
11222 rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
11223 phba->sli4_hba.rpi_count;
11224 spin_unlock_irq(&phba->hbalock);
11225 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
11226 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
11227 if (!rpi_hdr) {
11228 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11229 "2002 Error Could not grow rpi "
11230 "count\n");
11231 } else {
11232 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
11233 }
11234 }
11235
11236 return rpi;
11237}
11238
11239/**
11240 * lpfc_sli4_free_rpi - Release an rpi for reuse.
11241 * @phba: pointer to lpfc hba data structure.
11242 *
11243 * This routine is invoked to release an rpi to the pool of
11244 * available rpis maintained by the driver.
11245 **/
11246void
11247lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
11248{
11249 spin_lock_irq(&phba->hbalock);
11250 clear_bit(rpi, phba->sli4_hba.rpi_bmask);
11251 phba->sli4_hba.rpi_count--;
11252 phba->sli4_hba.max_cfg_param.rpi_used--;
11253 spin_unlock_irq(&phba->hbalock);
11254}
11255
11256/**
11257 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
11258 * @phba: pointer to lpfc hba data structure.
11259 *
11260 * This routine is invoked to remove the memory region that
11261 * provided rpi via a bitmask.
11262 **/
11263void
11264lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
11265{
11266 kfree(phba->sli4_hba.rpi_bmask);
11267}
11268
11269/**
11270 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
11271 * @phba: pointer to lpfc hba data structure.
11272 *
11273 * This routine is invoked to remove the memory region that
11274 * provided rpi via a bitmask.
11275 **/
11276int
11277lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
11278{
11279 LPFC_MBOXQ_t *mboxq;
11280 struct lpfc_hba *phba = ndlp->phba;
11281 int rc;
11282
11283 /* The port is notified of the header region via a mailbox command. */
11284 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11285 if (!mboxq)
11286 return -ENOMEM;
11287
11288 /* Post all rpi memory regions to the port. */
11289 lpfc_resume_rpi(mboxq, ndlp);
11290 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11291 if (rc == MBX_NOT_FINISHED) {
11292 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11293 "2010 Resume RPI Mailbox failed "
11294 "status %d, mbxStatus x%x\n", rc,
11295 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11296 mempool_free(mboxq, phba->mbox_mem_pool);
11297 return -EIO;
11298 }
11299 return 0;
11300}
11301
11302/**
11303 * lpfc_sli4_init_vpi - Initialize a vpi with the port
11304 * @phba: pointer to lpfc hba data structure.
11305 * @vpi: vpi value to activate with the port.
11306 *
11307 * This routine is invoked to activate a vpi with the
11308 * port when the host intends to use vports with a
11309 * nonzero vpi.
11310 *
11311 * Returns:
11312 * 0 success
11313 * -Evalue otherwise
11314 **/
11315int
11316lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11317{
11318 LPFC_MBOXQ_t *mboxq;
11319 int rc = 0;
11320 uint32_t mbox_tmo;
11321
11322 if (vpi == 0)
11323 return -EINVAL;
11324 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11325 if (!mboxq)
11326 return -ENOMEM;
11327 lpfc_init_vpi(mboxq, vpi);
11328 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
11329 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11330 if (rc != MBX_TIMEOUT)
11331 mempool_free(mboxq, phba->mbox_mem_pool);
11332 if (rc != MBX_SUCCESS) {
11333 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11334 "2022 INIT VPI Mailbox failed "
11335 "status %d, mbxStatus x%x\n", rc,
11336 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11337 rc = -EIO;
11338 }
11339 return rc;
11340}
11341
11342/**
11343 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
11344 * @phba: pointer to lpfc hba data structure.
11345 * @mboxq: Pointer to mailbox object.
11346 *
11347 * This routine is invoked to manually add a single FCF record. The caller
11348 * must pass a completely initialized FCF_Record. This routine takes
11349 * care of the nonembedded mailbox operations.
11350 **/
11351static void
11352lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11353{
11354 void *virt_addr;
11355 union lpfc_sli4_cfg_shdr *shdr;
11356 uint32_t shdr_status, shdr_add_status;
11357
11358 virt_addr = mboxq->sge_array->addr[0];
11359 /* The IOCTL status is embedded in the mailbox subheader. */
11360 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
11361 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11362 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11363
11364 if ((shdr_status || shdr_add_status) &&
11365 (shdr_status != STATUS_FCF_IN_USE))
11366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11367 "2558 ADD_FCF_RECORD mailbox failed with "
11368 "status x%x add_status x%x\n",
11369 shdr_status, shdr_add_status);
11370
11371 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11372}
11373
11374/**
11375 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
11376 * @phba: pointer to lpfc hba data structure.
11377 * @fcf_record: pointer to the initialized fcf record to add.
11378 *
11379 * This routine is invoked to manually add a single FCF record. The caller
11380 * must pass a completely initialized FCF_Record. This routine takes
11381 * care of the nonembedded mailbox operations.
11382 **/
11383int
11384lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
11385{
11386 int rc = 0;
11387 LPFC_MBOXQ_t *mboxq;
11388 uint8_t *bytep;
11389 void *virt_addr;
11390 dma_addr_t phys_addr;
11391 struct lpfc_mbx_sge sge;
11392 uint32_t alloc_len, req_len;
11393 uint32_t fcfindex;
11394
11395 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11396 if (!mboxq) {
11397 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11398 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
11399 return -ENOMEM;
11400 }
11401
11402 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
11403 sizeof(uint32_t);
11404
11405 /* Allocate DMA memory and set up the non-embedded mailbox command */
11406 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11407 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
11408 req_len, LPFC_SLI4_MBX_NEMBED);
11409 if (alloc_len < req_len) {
11410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11411 "2523 Allocated DMA memory size (x%x) is "
11412 "less than the requested DMA memory "
11413 "size (x%x)\n", alloc_len, req_len);
11414 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11415 return -ENOMEM;
11416 }
11417
11418 /*
11419 * Get the first SGE entry from the non-embedded DMA memory. This
11420 * routine only uses a single SGE.
11421 */
11422 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11423 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11424 if (unlikely(!mboxq->sge_array)) {
11425 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11426 "2526 Failed to get the non-embedded SGE "
11427 "virtual address\n");
11428 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11429 return -ENOMEM;
11430 }
11431 virt_addr = mboxq->sge_array->addr[0];
11432 /*
11433 * Configure the FCF record for FCFI 0. This is the driver's
11434 * hardcoded default and gets used in nonFIP mode.
11435 */
11436 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
11437 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11438 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
11439
11440 /*
11441 * Copy the fcf_index and the FCF Record Data. The data starts after
11442 * the FCoE header plus word10. The data copy needs to be endian
11443 * correct.
11444 */
11445 bytep += sizeof(uint32_t);
11446 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
11447 mboxq->vport = phba->pport;
11448 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
11449 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11450 if (rc == MBX_NOT_FINISHED) {
11451 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11452 "2515 ADD_FCF_RECORD mailbox failed with "
11453 "status 0x%x\n", rc);
11454 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11455 rc = -EIO;
11456 } else
11457 rc = 0;
11458
11459 return rc;
11460}
11461
11462/**
11463 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
11464 * @phba: pointer to lpfc hba data structure.
11465 * @fcf_record: pointer to the fcf record to write the default data.
11466 * @fcf_index: FCF table entry index.
11467 *
11468 * This routine is invoked to build the driver's default FCF record. The
11469 * values used are hardcoded. This routine handles memory initialization.
11470 *
11471 **/
11472void
11473lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11474 struct fcf_record *fcf_record,
11475 uint16_t fcf_index)
11476{
11477 memset(fcf_record, 0, sizeof(struct fcf_record));
11478 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
11479 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
11480 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
11481 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
11482 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
11483 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
11484 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
11485 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
11486 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
11487 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
11488 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
11489 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
11490 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
11491 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
11492 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
11493 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
11494 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
11495 /* Set the VLAN bit map */
11496 if (phba->valid_vlan) {
11497 fcf_record->vlan_bitmap[phba->vlan_id / 8]
11498 = 1 << (phba->vlan_id % 8);
11499 }
11500}
11501
11502/**
11503 * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
11504 * @phba: pointer to lpfc hba data structure.
11505 * @fcf_index: FCF table entry offset.
11506 *
11507 * This routine is invoked to read up to @fcf_num of FCF record from the
11508 * device starting with the given @fcf_index.
11509 **/
11510int
11511lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11512{
11513 int rc = 0, error;
11514 LPFC_MBOXQ_t *mboxq;
11515 void *virt_addr;
11516 dma_addr_t phys_addr;
11517 uint8_t *bytep;
11518 struct lpfc_mbx_sge sge;
11519 uint32_t alloc_len, req_len;
11520 struct lpfc_mbx_read_fcf_tbl *read_fcf;
11521
11522 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11523 if (!mboxq) {
11524 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11525 "2000 Failed to allocate mbox for "
11526 "READ_FCF cmd\n");
11527 return -ENOMEM;
11528 }
11529
11530 req_len = sizeof(struct fcf_record) +
11531 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
11532
11533 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
11534 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11535 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
11536 LPFC_SLI4_MBX_NEMBED);
11537
11538 if (alloc_len < req_len) {
11539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11540 "0291 Allocated DMA memory size (x%x) is "
11541 "less than the requested DMA memory "
11542 "size (x%x)\n", alloc_len, req_len);
11543 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11544 return -ENOMEM;
11545 }
11546
11547 /* Get the first SGE entry from the non-embedded DMA memory. This
11548 * routine only uses a single SGE.
11549 */
11550 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11551 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11552 if (unlikely(!mboxq->sge_array)) {
11553 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11554 "2527 Failed to get the non-embedded SGE "
11555 "virtual address\n");
11556 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11557 return -ENOMEM;
11558 }
11559 virt_addr = mboxq->sge_array->addr[0];
11560 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
11561
11562 /* Set up command fields */
11563 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
11564 /* Perform necessary endian conversion */
11565 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11566 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
11567 mboxq->vport = phba->pport;
11568 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
11569 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11570 if (rc == MBX_NOT_FINISHED) {
11571 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11572 error = -EIO;
11573 } else
11574 error = 0;
11575 return error;
11576}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 883938652a6a..3c53316cf6d0 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -29,13 +29,23 @@ typedef enum _lpfc_ctx_cmd {
29 LPFC_CTX_HOST 29 LPFC_CTX_HOST
30} lpfc_ctx_cmd; 30} lpfc_ctx_cmd;
31 31
32/* This structure is used to carry the needed response IOCB states */
33struct lpfc_sli4_rspiocb_info {
34 uint8_t hw_status;
35 uint8_t bfield;
36#define LPFC_XB 0x1
37#define LPFC_PV 0x2
38 uint8_t priority;
39 uint8_t reserved;
40};
41
32/* This structure is used to handle IOCB requests / responses */ 42/* This structure is used to handle IOCB requests / responses */
33struct lpfc_iocbq { 43struct lpfc_iocbq {
34 /* lpfc_iocbqs are used in double linked lists */ 44 /* lpfc_iocbqs are used in double linked lists */
35 struct list_head list; 45 struct list_head list;
36 struct list_head clist; 46 struct list_head clist;
37 uint16_t iotag; /* pre-assigned IO tag */ 47 uint16_t iotag; /* pre-assigned IO tag */
38 uint16_t rsvd1; 48 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
39 49
40 IOCB_t iocb; /* IOCB cmd */ 50 IOCB_t iocb; /* IOCB cmd */
41 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 51 uint8_t retry; /* retry counter for IOCB cmd - if needed */
@@ -46,6 +56,7 @@ struct lpfc_iocbq {
46#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ 56#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
47#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ 57#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
48#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ 58#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
59#define LPFC_FIP_ELS 0x40
49 60
50 uint8_t abort_count; 61 uint8_t abort_count;
51 uint8_t rsvd2; 62 uint8_t rsvd2;
@@ -65,7 +76,7 @@ struct lpfc_iocbq {
65 struct lpfc_iocbq *); 76 struct lpfc_iocbq *);
66 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 77 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
67 struct lpfc_iocbq *); 78 struct lpfc_iocbq *);
68 79 struct lpfc_sli4_rspiocb_info sli4_info;
69}; 80};
70 81
71#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ 82#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@@ -81,14 +92,18 @@ struct lpfc_iocbq {
81typedef struct lpfcMboxq { 92typedef struct lpfcMboxq {
82 /* MBOXQs are used in single linked lists */ 93 /* MBOXQs are used in single linked lists */
83 struct list_head list; /* ptr to next mailbox command */ 94 struct list_head list; /* ptr to next mailbox command */
84 MAILBOX_t mb; /* Mailbox cmd */ 95 union {
85 struct lpfc_vport *vport;/* virutal port pointer */ 96 MAILBOX_t mb; /* Mailbox cmd */
97 struct lpfc_mqe mqe;
98 } u;
99 struct lpfc_vport *vport;/* virtual port pointer */
86 void *context1; /* caller context information */ 100 void *context1; /* caller context information */
87 void *context2; /* caller context information */ 101 void *context2; /* caller context information */
88 102
89 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); 103 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
90 uint8_t mbox_flag; 104 uint8_t mbox_flag;
91 105 struct lpfc_mcqe mcqe;
106 struct lpfc_mbx_nembed_sge_virt *sge_array;
92} LPFC_MBOXQ_t; 107} LPFC_MBOXQ_t;
93 108
94#define MBX_POLL 1 /* poll mailbox till command done, then 109#define MBX_POLL 1 /* poll mailbox till command done, then
@@ -230,10 +245,11 @@ struct lpfc_sli {
230 245
231 /* Additional sli_flags */ 246 /* Additional sli_flags */
232#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */ 247#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
233#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ 248#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */
234#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ 249#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
235#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ 250#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
236#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ 251#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
252#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
237 253
238 struct lpfc_sli_ring ring[LPFC_MAX_RING]; 254 struct lpfc_sli_ring ring[LPFC_MAX_RING];
239 int fcp_ring; /* ring used for FCP initiator commands */ 255 int fcp_ring; /* ring used for FCP initiator commands */
@@ -261,6 +277,8 @@ struct lpfc_sli {
261 277
262#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox 278#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
263 command */ 279 command */
280#define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox
281 command */
264#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write 282#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
265 * or erase cmds. This is especially 283 * or erase cmds. This is especially
266 * long because of the potential of 284 * long because of the potential of
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
new file mode 100644
index 000000000000..3b276b47d18f
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -0,0 +1,467 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21#define LPFC_ACTIVE_MBOX_WAIT_CNT 100
22#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
23#define LPFC_GET_QE_REL_INT 32
24#define LPFC_RPI_LOW_WATER_MARK 10
25/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
26#define LPFC_NEMBED_MBOX_SGL_CNT 254
27
28/* Multi-queue arrangement for fast-path FCP work queues */
29#define LPFC_FN_EQN_MAX 8
30#define LPFC_SP_EQN_DEF 1
31#define LPFC_FP_EQN_DEF 1
32#define LPFC_FP_EQN_MIN 1
33#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
34
35#define LPFC_FN_WQN_MAX 32
36#define LPFC_SP_WQN_DEF 1
37#define LPFC_FP_WQN_DEF 4
38#define LPFC_FP_WQN_MIN 1
39#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
40
41/*
42 * Provide the default FCF Record attributes used by the driver
43 * when nonFIP mode is configured and there is no other default
44 * FCF Record attributes.
45 */
46#define LPFC_FCOE_FCF_DEF_INDEX 0
47#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
48#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
49
50/* First 3 bytes of default FCF MAC is specified by FC_MAP */
51#define LPFC_FCOE_FCF_MAC3 0xFF
52#define LPFC_FCOE_FCF_MAC4 0xFF
53#define LPFC_FCOE_FCF_MAC5 0xFE
54#define LPFC_FCOE_FCF_MAP0 0x0E
55#define LPFC_FCOE_FCF_MAP1 0xFC
56#define LPFC_FCOE_FCF_MAP2 0x00
57#define LPFC_FCOE_MAX_RCV_SIZE 0x5AC
58#define LPFC_FCOE_FKA_ADV_PER 0
59#define LPFC_FCOE_FIP_PRIORITY 0x80
60
61enum lpfc_sli4_queue_type {
62 LPFC_EQ,
63 LPFC_GCQ,
64 LPFC_MCQ,
65 LPFC_WCQ,
66 LPFC_RCQ,
67 LPFC_MQ,
68 LPFC_WQ,
69 LPFC_HRQ,
70 LPFC_DRQ
71};
72
73/* The queue sub-type defines the functional purpose of the queue */
74enum lpfc_sli4_queue_subtype {
75 LPFC_NONE,
76 LPFC_MBOX,
77 LPFC_FCP,
78 LPFC_ELS,
79 LPFC_USOL
80};
81
82union sli4_qe {
83 void *address;
84 struct lpfc_eqe *eqe;
85 struct lpfc_cqe *cqe;
86 struct lpfc_mcqe *mcqe;
87 struct lpfc_wcqe_complete *wcqe_complete;
88 struct lpfc_wcqe_release *wcqe_release;
89 struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
90 struct lpfc_rcqe_complete *rcqe_complete;
91 struct lpfc_mqe *mqe;
92 union lpfc_wqe *wqe;
93 struct lpfc_rqe *rqe;
94};
95
96struct lpfc_queue {
97 struct list_head list;
98 enum lpfc_sli4_queue_type type;
99 enum lpfc_sli4_queue_subtype subtype;
100 struct lpfc_hba *phba;
101 struct list_head child_list;
102 uint32_t entry_count; /* Number of entries to support on the queue */
103 uint32_t entry_size; /* Size of each queue entry. */
104 uint32_t queue_id; /* Queue ID assigned by the hardware */
105 struct list_head page_list;
106 uint32_t page_count; /* Number of pages allocated for this queue */
107
108 uint32_t host_index; /* The host's index for putting or getting */
109 uint32_t hba_index; /* The last known hba index for get or put */
110 union sli4_qe qe[1]; /* array to index entries (must be last) */
111};
112
113struct lpfc_cq_event {
114 struct list_head list;
115 union {
116 struct lpfc_mcqe mcqe_cmpl;
117 struct lpfc_acqe_link acqe_link;
118 struct lpfc_acqe_fcoe acqe_fcoe;
119 struct lpfc_acqe_dcbx acqe_dcbx;
120 struct lpfc_rcqe rcqe_cmpl;
121 struct sli4_wcqe_xri_aborted wcqe_axri;
122 } cqe;
123};
124
125struct lpfc_sli4_link {
126 uint8_t speed;
127 uint8_t duplex;
128 uint8_t status;
129 uint8_t physical;
130 uint8_t fault;
131};
132
133struct lpfc_fcf {
134 uint8_t fabric_name[8];
135 uint8_t mac_addr[6];
136 uint16_t fcf_indx;
137 uint16_t fcfi;
138 uint32_t fcf_flag;
139#define FCF_AVAILABLE 0x01 /* FCF available for discovery */
140#define FCF_REGISTERED 0x02 /* FCF registered with FW */
141#define FCF_DISCOVERED 0x04 /* FCF discovery started */
142#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */
143#define FCF_IN_USE 0x10 /* Atleast one discovery completed */
144#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */
145 uint32_t priority;
146 uint32_t addr_mode;
147 uint16_t vlan_id;
148};
149
150#define LPFC_REGION23_SIGNATURE "RG23"
151#define LPFC_REGION23_VERSION 1
152#define LPFC_REGION23_LAST_REC 0xff
153struct lpfc_fip_param_hdr {
154 uint8_t type;
155#define FCOE_PARAM_TYPE 0xA0
156 uint8_t length;
157#define FCOE_PARAM_LENGTH 2
158 uint8_t parm_version;
159#define FIPP_VERSION 0x01
160 uint8_t parm_flags;
161#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
162#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
163#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
164#define FIPP_MODE_ON 0x2
165#define FIPP_MODE_OFF 0x0
166#define FIPP_VLAN_VALID 0x1
167};
168
169struct lpfc_fcoe_params {
170 uint8_t fc_map[3];
171 uint8_t reserved1;
172 uint16_t vlan_tag;
173 uint8_t reserved[2];
174};
175
176struct lpfc_fcf_conn_hdr {
177 uint8_t type;
178#define FCOE_CONN_TBL_TYPE 0xA1
179 uint8_t length; /* words */
180 uint8_t reserved[2];
181};
182
183struct lpfc_fcf_conn_rec {
184 uint16_t flags;
185#define FCFCNCT_VALID 0x0001
186#define FCFCNCT_BOOT 0x0002
187#define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */
188#define FCFCNCT_FBNM_VALID 0x0008
189#define FCFCNCT_SWNM_VALID 0x0010
190#define FCFCNCT_VLAN_VALID 0x0020
191#define FCFCNCT_AM_VALID 0x0040
192#define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */
193#define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */
194
195 uint16_t vlan_tag;
196 uint8_t fabric_name[8];
197 uint8_t switch_name[8];
198};
199
200struct lpfc_fcf_conn_entry {
201 struct list_head list;
202 struct lpfc_fcf_conn_rec conn_rec;
203};
204
205/*
206 * Define the host's bootstrap mailbox. This structure contains
207 * the member attributes needed to create, use, and destroy the
208 * bootstrap mailbox region.
209 *
210 * The macro definitions for the bmbx data structure are defined
211 * in lpfc_hw4.h with the register definition.
212 */
213struct lpfc_bmbx {
214 struct lpfc_dmabuf *dmabuf;
215 struct dma_address dma_address;
216 void *avirt;
217 dma_addr_t aphys;
218 uint32_t bmbx_size;
219};
220
221#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
222
223#define LPFC_EQE_SIZE_4B 4
224#define LPFC_EQE_SIZE_16B 16
225#define LPFC_CQE_SIZE 16
226#define LPFC_WQE_SIZE 64
227#define LPFC_MQE_SIZE 256
228#define LPFC_RQE_SIZE 8
229
230#define LPFC_EQE_DEF_COUNT 1024
231#define LPFC_CQE_DEF_COUNT 256
232#define LPFC_WQE_DEF_COUNT 256
233#define LPFC_MQE_DEF_COUNT 16
234#define LPFC_RQE_DEF_COUNT 512
235
236#define LPFC_QUEUE_NOARM false
237#define LPFC_QUEUE_REARM true
238
239
240/*
241 * SLI4 CT field defines
242 */
243#define SLI4_CT_RPI 0
244#define SLI4_CT_VPI 1
245#define SLI4_CT_VFI 2
246#define SLI4_CT_FCFI 3
247
248#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000
249
250/*
251 * SLI4 specific data structures
252 */
253struct lpfc_max_cfg_param {
254 uint16_t max_xri;
255 uint16_t xri_base;
256 uint16_t xri_used;
257 uint16_t max_rpi;
258 uint16_t rpi_base;
259 uint16_t rpi_used;
260 uint16_t max_vpi;
261 uint16_t vpi_base;
262 uint16_t vpi_used;
263 uint16_t max_vfi;
264 uint16_t vfi_base;
265 uint16_t vfi_used;
266 uint16_t max_fcfi;
267 uint16_t fcfi_base;
268 uint16_t fcfi_used;
269 uint16_t max_eq;
270 uint16_t max_rq;
271 uint16_t max_cq;
272 uint16_t max_wq;
273};
274
275struct lpfc_hba;
276/* SLI4 HBA multi-fcp queue handler struct */
277struct lpfc_fcp_eq_hdl {
278 uint32_t idx;
279 struct lpfc_hba *phba;
280};
281
282/* SLI4 HBA data structure entries */
283struct lpfc_sli4_hba {
284 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
285 PCI BAR0, config space registers */
286 void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
287 PCI BAR1, control registers */
288 void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
289 PCI BAR2, doorbell registers */
290 /* BAR0 PCI config space register memory map */
291 void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
292 void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
293 void __iomem *ONLINE0regaddr; /* Address to components of internal UE */
294 void __iomem *ONLINE1regaddr; /* Address to components of internal UE */
295#define LPFC_ONLINE_NERR 0xFFFFFFFF
296 void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
297 /* BAR1 FCoE function CSR register memory map */
298 void __iomem *STAregaddr; /* Address to HST_STATE register */
299 void __iomem *ISRregaddr; /* Address to HST_ISR register */
300 void __iomem *IMRregaddr; /* Address to HST_IMR register */
301 void __iomem *ISCRregaddr; /* Address to HST_ISCR register */
302 /* BAR2 VF-0 doorbell register memory map */
303 void __iomem *RQDBregaddr; /* Address to RQ_DOORBELL register */
304 void __iomem *WQDBregaddr; /* Address to WQ_DOORBELL register */
305 void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */
306 void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */
307 void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */
308
309 struct msix_entry *msix_entries;
310 uint32_t cfg_eqn;
311 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
312 /* Pointers to the constructed SLI4 queues */
313 struct lpfc_queue **fp_eq; /* Fast-path event queue */
314 struct lpfc_queue *sp_eq; /* Slow-path event queue */
315 struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
316 struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
317 struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
318 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
319 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
320 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
321 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
322 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
323 struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
324
325 /* Setup information for various queue parameters */
326 int eq_esize;
327 int eq_ecount;
328 int cq_esize;
329 int cq_ecount;
330 int wq_esize;
331 int wq_ecount;
332 int mq_esize;
333 int mq_ecount;
334 int rq_esize;
335 int rq_ecount;
336#define LPFC_SP_EQ_MAX_INTR_SEC 10000
337#define LPFC_FP_EQ_MAX_INTR_SEC 10000
338
339 uint32_t intr_enable;
340 struct lpfc_bmbx bmbx;
341 struct lpfc_max_cfg_param max_cfg_param;
342 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
343 uint16_t next_rpi;
344 uint16_t scsi_xri_max;
345 uint16_t scsi_xri_cnt;
346 struct list_head lpfc_free_sgl_list;
347 struct list_head lpfc_sgl_list;
348 struct lpfc_sglq **lpfc_els_sgl_array;
349 struct list_head lpfc_abts_els_sgl_list;
350 struct lpfc_scsi_buf **lpfc_scsi_psb_array;
351 struct list_head lpfc_abts_scsi_buf_list;
352 uint32_t total_sglq_bufs;
353 struct lpfc_sglq **lpfc_sglq_active_list;
354 struct list_head lpfc_rpi_hdr_list;
355 unsigned long *rpi_bmask;
356 uint16_t rpi_count;
357 struct lpfc_sli4_flags sli4_flags;
358 struct list_head sp_rspiocb_work_queue;
359 struct list_head sp_cqe_event_pool;
360 struct list_head sp_asynce_work_queue;
361 struct list_head sp_fcp_xri_aborted_work_queue;
362 struct list_head sp_els_xri_aborted_work_queue;
363 struct list_head sp_unsol_work_queue;
364 struct lpfc_sli4_link link_state;
365 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
366 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
367};
368
369enum lpfc_sge_type {
370 GEN_BUFF_TYPE,
371 SCSI_BUFF_TYPE
372};
373
374struct lpfc_sglq {
375 /* lpfc_sglqs are used in double linked lists */
376 struct list_head list;
377 struct list_head clist;
378 enum lpfc_sge_type buff_type; /* is this a scsi sgl */
379 uint16_t iotag; /* pre-assigned IO tag */
380 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
381 struct sli4_sge *sgl; /* pre-assigned SGL */
382 void *virt; /* virtual address. */
383 dma_addr_t phys; /* physical address */
384};
385
386struct lpfc_rpi_hdr {
387 struct list_head list;
388 uint32_t len;
389 struct lpfc_dmabuf *dmabuf;
390 uint32_t page_count;
391 uint32_t start_rpi;
392};
393
394/*
395 * SLI4 specific function prototypes
396 */
397int lpfc_pci_function_reset(struct lpfc_hba *);
398int lpfc_sli4_hba_setup(struct lpfc_hba *);
399int lpfc_sli4_hba_down(struct lpfc_hba *);
400int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
401 uint8_t, uint32_t, bool);
402void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
403void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
404void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
405 struct lpfc_mbx_sge *);
406
407void lpfc_sli4_hba_reset(struct lpfc_hba *);
408struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
409 uint32_t);
410void lpfc_sli4_queue_free(struct lpfc_queue *);
411uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
412uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
413 struct lpfc_queue *, uint32_t, uint32_t);
414uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
415 struct lpfc_queue *, uint32_t);
416uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
417 struct lpfc_queue *, uint32_t);
418uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
419 struct lpfc_queue *, struct lpfc_queue *, uint32_t);
420uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
421uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
422uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
423uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
424uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
425 struct lpfc_queue *);
426int lpfc_sli4_queue_setup(struct lpfc_hba *);
427void lpfc_sli4_queue_unset(struct lpfc_hba *);
428int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
429int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
430int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *);
431uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
432int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
433int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
434int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
435struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
436struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
437void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
438void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
439int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
440int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
441int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
442struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
443void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
444int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
445void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
446void lpfc_sli4_remove_rpis(struct lpfc_hba *);
447void lpfc_sli4_async_event_proc(struct lpfc_hba *);
448int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
449void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
450void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
451void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
452 struct sli4_wcqe_xri_aborted *);
453void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
454 struct sli4_wcqe_xri_aborted *);
455int lpfc_sli4_brdreset(struct lpfc_hba *);
456int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
457void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
458int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
459int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
460uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
461uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
462void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
463int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t);
464void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *);
465int lpfc_sli4_post_status_check(struct lpfc_hba *);
466uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
467
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e599519e3078..41094e02304b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.1" 21#define LPFC_DRIVER_VERSION "8.3.3"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 917ad56b0aff..e0b49922193e 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -32,8 +32,10 @@
32#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 36#include "lpfc_hw.h"
36#include "lpfc_sli.h" 37#include "lpfc_sli.h"
38#include "lpfc_sli4.h"
37#include "lpfc_nl.h" 39#include "lpfc_nl.h"
38#include "lpfc_disc.h" 40#include "lpfc_disc.h"
39#include "lpfc_scsi.h" 41#include "lpfc_scsi.h"
@@ -89,6 +91,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
89 vpi = 0; 91 vpi = 0;
90 else 92 else
91 set_bit(vpi, phba->vpi_bmask); 93 set_bit(vpi, phba->vpi_bmask);
94 if (phba->sli_rev == LPFC_SLI_REV4)
95 phba->sli4_hba.max_cfg_param.vpi_used++;
92 spin_unlock_irq(&phba->hbalock); 96 spin_unlock_irq(&phba->hbalock);
93 return vpi; 97 return vpi;
94} 98}
@@ -96,8 +100,12 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
96static void 100static void
97lpfc_free_vpi(struct lpfc_hba *phba, int vpi) 101lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
98{ 102{
103 if (vpi == 0)
104 return;
99 spin_lock_irq(&phba->hbalock); 105 spin_lock_irq(&phba->hbalock);
100 clear_bit(vpi, phba->vpi_bmask); 106 clear_bit(vpi, phba->vpi_bmask);
107 if (phba->sli_rev == LPFC_SLI_REV4)
108 phba->sli4_hba.max_cfg_param.vpi_used--;
101 spin_unlock_irq(&phba->hbalock); 109 spin_unlock_irq(&phba->hbalock);
102} 110}
103 111
@@ -113,7 +121,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
113 if (!pmb) { 121 if (!pmb) {
114 return -ENOMEM; 122 return -ENOMEM;
115 } 123 }
116 mb = &pmb->mb; 124 mb = &pmb->u.mb;
117 125
118 lpfc_read_sparam(phba, pmb, vport->vpi); 126 lpfc_read_sparam(phba, pmb, vport->vpi);
119 /* 127 /*
@@ -243,23 +251,22 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
243 (vport->fc_flag & wait_flags) || 251 (vport->fc_flag & wait_flags) ||
244 ((vport->port_state > LPFC_VPORT_FAILED) && 252 ((vport->port_state > LPFC_VPORT_FAILED) &&
245 (vport->port_state < LPFC_VPORT_READY))) { 253 (vport->port_state < LPFC_VPORT_READY))) {
246 lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, 254 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
247 "1833 Vport discovery quiesce Wait:" 255 "1833 Vport discovery quiesce Wait:"
248 " vpi x%x state x%x fc_flags x%x" 256 " state x%x fc_flags x%x"
249 " num_nodes x%x, waiting 1000 msecs" 257 " num_nodes x%x, waiting 1000 msecs"
250 " total wait msecs x%x\n", 258 " total wait msecs x%x\n",
251 vport->vpi, vport->port_state, 259 vport->port_state, vport->fc_flag,
252 vport->fc_flag, vport->num_disc_nodes, 260 vport->num_disc_nodes,
253 jiffies_to_msecs(jiffies - start_time)); 261 jiffies_to_msecs(jiffies - start_time));
254 msleep(1000); 262 msleep(1000);
255 } else { 263 } else {
256 /* Base case. Wait variants satisfied. Break out */ 264 /* Base case. Wait variants satisfied. Break out */
257 lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, 265 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
258 "1834 Vport discovery quiesced:" 266 "1834 Vport discovery quiesced:"
259 " vpi x%x state x%x fc_flags x%x" 267 " state x%x fc_flags x%x"
260 " wait msecs x%x\n", 268 " wait msecs x%x\n",
261 vport->vpi, vport->port_state, 269 vport->port_state, vport->fc_flag,
262 vport->fc_flag,
263 jiffies_to_msecs(jiffies 270 jiffies_to_msecs(jiffies
264 - start_time)); 271 - start_time));
265 break; 272 break;
@@ -267,12 +274,10 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
267 } 274 }
268 275
269 if (time_after(jiffies, wait_time_max)) 276 if (time_after(jiffies, wait_time_max))
270 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 277 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
271 "1835 Vport discovery quiesce failed:" 278 "1835 Vport discovery quiesce failed:"
272 " vpi x%x state x%x fc_flags x%x" 279 " state x%x fc_flags x%x wait msecs x%x\n",
273 " wait msecs x%x\n", 280 vport->port_state, vport->fc_flag,
274 vport->vpi, vport->port_state,
275 vport->fc_flag,
276 jiffies_to_msecs(jiffies - start_time)); 281 jiffies_to_msecs(jiffies - start_time));
277} 282}
278 283
@@ -308,6 +313,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
308 goto error_out; 313 goto error_out;
309 } 314 }
310 315
316 /*
317 * In SLI4, the vpi must be activated before it can be used
318 * by the port.
319 */
320 if (phba->sli_rev == LPFC_SLI_REV4) {
321 rc = lpfc_sli4_init_vpi(phba, vpi);
322 if (rc) {
323 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
324 "1838 Failed to INIT_VPI on vpi %d "
325 "status %d\n", vpi, rc);
326 rc = VPORT_NORESOURCES;
327 lpfc_free_vpi(phba, vpi);
328 goto error_out;
329 }
330 }
311 331
312 /* Assign an unused board number */ 332 /* Assign an unused board number */
313 if ((instance = lpfc_get_instance()) < 0) { 333 if ((instance = lpfc_get_instance()) < 0) {
@@ -535,6 +555,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
535 "physical host\n"); 555 "physical host\n");
536 return VPORT_ERROR; 556 return VPORT_ERROR;
537 } 557 }
558
559 /* If the vport is a static vport fail the deletion. */
560 if ((vport->vport_flag & STATIC_VPORT) &&
561 !(phba->pport->load_flag & FC_UNLOADING)) {
562 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
563 "1837 vport_delete failed: Cannot delete "
564 "static vport.\n");
565 return VPORT_ERROR;
566 }
567
538 /* 568 /*
539 * If we are not unloading the driver then prevent the vport_delete 569 * If we are not unloading the driver then prevent the vport_delete
540 * from happening until after this vport's discovery is finished. 570 * from happening until after this vport's discovery is finished.
@@ -665,8 +695,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
665 } 695 }
666 vport->unreg_vpi_cmpl = VPORT_INVAL; 696 vport->unreg_vpi_cmpl = VPORT_INVAL;
667 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 697 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
668 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
669 goto skip_logo;
670 if (!lpfc_issue_els_npiv_logo(vport, ndlp)) 698 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
671 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) 699 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
672 timeout = schedule_timeout(timeout); 700 timeout = schedule_timeout(timeout);
@@ -710,7 +738,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
710 struct lpfc_vport *port_iterator; 738 struct lpfc_vport *port_iterator;
711 struct lpfc_vport **vports; 739 struct lpfc_vport **vports;
712 int index = 0; 740 int index = 0;
713 vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *), 741 vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
714 GFP_KERNEL); 742 GFP_KERNEL);
715 if (vports == NULL) 743 if (vports == NULL)
716 return NULL; 744 return NULL;
@@ -734,7 +762,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
734 int i; 762 int i;
735 if (vports == NULL) 763 if (vports == NULL)
736 return; 764 return;
737 for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++) 765 for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++)
738 scsi_host_put(lpfc_shost_from_vport(vports[i])); 766 scsi_host_put(lpfc_shost_from_vport(vports[i]));
739 kfree(vports); 767 kfree(vports);
740} 768}
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 795201fa0b48..512c2cc1a33f 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -469,7 +469,7 @@ typedef struct {
469 u8 type; /* Type of the device */ 469 u8 type; /* Type of the device */
470 u8 cur_status; /* current status of the device */ 470 u8 cur_status; /* current status of the device */
471 u8 tag_depth; /* Level of tagging */ 471 u8 tag_depth; /* Level of tagging */
472 u8 sync_neg; /* sync negotiation - ENABLE or DISBALE */ 472 u8 sync_neg; /* sync negotiation - ENABLE or DISABLE */
473 u32 size; /* configurable size in terms of 512 byte 473 u32 size; /* configurable size in terms of 512 byte
474 blocks */ 474 blocks */
475}__attribute__ ((packed)) phys_drv; 475}__attribute__ ((packed)) phys_drv;
diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h
index 170399ef06f4..b25b74764ec3 100644
--- a/drivers/scsi/megaraid/mbox_defs.h
+++ b/drivers/scsi/megaraid/mbox_defs.h
@@ -686,7 +686,7 @@ typedef struct {
686 * @type : Type of the device 686 * @type : Type of the device
687 * @cur_status : current status of the device 687 * @cur_status : current status of the device
688 * @tag_depth : Level of tagging 688 * @tag_depth : Level of tagging
689 * @sync_neg : sync negotiation - ENABLE or DISBALE 689 * @sync_neg : sync negotiation - ENABLE or DISABLE
690 * @size : configurable size in terms of 512 byte 690 * @size : configurable size in terms of 512 byte
691 */ 691 */
692typedef struct { 692typedef struct {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 36b1d1052ba1..286c185fa9e4 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -61,6 +61,7 @@
61#include <scsi/scsi_tcq.h> 61#include <scsi/scsi_tcq.h>
62#include <scsi/scsi_transport_sas.h> 62#include <scsi/scsi_transport_sas.h>
63#include <scsi/scsi_dbg.h> 63#include <scsi/scsi_dbg.h>
64#include <scsi/scsi_eh.h>
64 65
65#include "mpt2sas_debug.h" 66#include "mpt2sas_debug.h"
66 67
@@ -68,10 +69,10 @@
68#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
69#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
70#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
71#define MPT2SAS_DRIVER_VERSION "01.100.02.00" 72#define MPT2SAS_DRIVER_VERSION "01.100.03.00"
72#define MPT2SAS_MAJOR_VERSION 01 73#define MPT2SAS_MAJOR_VERSION 01
73#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
74#define MPT2SAS_BUILD_VERSION 02 75#define MPT2SAS_BUILD_VERSION 03
75#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 00
76 77
77/* 78/*
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index ba6ab170bdf0..14e473d1fa7b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -473,7 +473,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
473} 473}
474 474
475/** 475/**
476 * _ctl_do_task_abort - assign an active smid to the abort_task 476 * _ctl_set_task_mid - assign an active smid to tm request
477 * @ioc: per adapter object 477 * @ioc: per adapter object
478 * @karg - (struct mpt2_ioctl_command) 478 * @karg - (struct mpt2_ioctl_command)
479 * @tm_request - pointer to mf from user space 479 * @tm_request - pointer to mf from user space
@@ -482,7 +482,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
482 * during failure, the reply frame is filled. 482 * during failure, the reply frame is filled.
483 */ 483 */
484static int 484static int
485_ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, 485_ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
486 Mpi2SCSITaskManagementRequest_t *tm_request) 486 Mpi2SCSITaskManagementRequest_t *tm_request)
487{ 487{
488 u8 found = 0; 488 u8 found = 0;
@@ -494,6 +494,14 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
494 Mpi2SCSITaskManagementReply_t *tm_reply; 494 Mpi2SCSITaskManagementReply_t *tm_reply;
495 u32 sz; 495 u32 sz;
496 u32 lun; 496 u32 lun;
497 char *desc = NULL;
498
499 if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
500 desc = "abort_task";
501 else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
502 desc = "query_task";
503 else
504 return 0;
497 505
498 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); 506 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
499 507
@@ -517,13 +525,13 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
517 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 525 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
518 526
519 if (!found) { 527 if (!found) {
520 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: " 528 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
521 "DevHandle(0x%04x), lun(%d), no active mid!!\n", ioc->name, 529 "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
522 tm_request->DevHandle, lun)); 530 desc, tm_request->DevHandle, lun));
523 tm_reply = ioc->ctl_cmds.reply; 531 tm_reply = ioc->ctl_cmds.reply;
524 tm_reply->DevHandle = tm_request->DevHandle; 532 tm_reply->DevHandle = tm_request->DevHandle;
525 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 533 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
526 tm_reply->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; 534 tm_reply->TaskType = tm_request->TaskType;
527 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; 535 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
528 tm_reply->VP_ID = tm_request->VP_ID; 536 tm_reply->VP_ID = tm_request->VP_ID;
529 tm_reply->VF_ID = tm_request->VF_ID; 537 tm_reply->VF_ID = tm_request->VF_ID;
@@ -535,9 +543,9 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
535 return 1; 543 return 1;
536 } 544 }
537 545
538 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: " 546 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
539 "DevHandle(0x%04x), lun(%d), smid(%d)\n", ioc->name, 547 "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
540 tm_request->DevHandle, lun, tm_request->TaskMID)); 548 desc, tm_request->DevHandle, lun, tm_request->TaskMID));
541 return 0; 549 return 0;
542} 550}
543 551
@@ -739,8 +747,10 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
739 (Mpi2SCSITaskManagementRequest_t *)mpi_request; 747 (Mpi2SCSITaskManagementRequest_t *)mpi_request;
740 748
741 if (tm_request->TaskType == 749 if (tm_request->TaskType ==
742 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 750 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
743 if (_ctl_do_task_abort(ioc, &karg, tm_request)) { 751 tm_request->TaskType ==
752 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
753 if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
744 mpt2sas_base_free_smid(ioc, smid); 754 mpt2sas_base_free_smid(ioc, smid);
745 goto out; 755 goto out;
746 } 756 }
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index e3a7967259e7..2a01a5f2a84d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -197,12 +197,12 @@ static struct pci_device_id scsih_pci_table[] = {
197MODULE_DEVICE_TABLE(pci, scsih_pci_table); 197MODULE_DEVICE_TABLE(pci, scsih_pci_table);
198 198
199/** 199/**
200 * scsih_set_debug_level - global setting of ioc->logging_level. 200 * _scsih_set_debug_level - global setting of ioc->logging_level.
201 * 201 *
202 * Note: The logging levels are defined in mpt2sas_debug.h. 202 * Note: The logging levels are defined in mpt2sas_debug.h.
203 */ 203 */
204static int 204static int
205scsih_set_debug_level(const char *val, struct kernel_param *kp) 205_scsih_set_debug_level(const char *val, struct kernel_param *kp)
206{ 206{
207 int ret = param_set_int(val, kp); 207 int ret = param_set_int(val, kp);
208 struct MPT2SAS_ADAPTER *ioc; 208 struct MPT2SAS_ADAPTER *ioc;
@@ -215,7 +215,7 @@ scsih_set_debug_level(const char *val, struct kernel_param *kp)
215 ioc->logging_level = logging_level; 215 ioc->logging_level = logging_level;
216 return 0; 216 return 0;
217} 217}
218module_param_call(logging_level, scsih_set_debug_level, param_get_int, 218module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
219 &logging_level, 0644); 219 &logging_level, 0644);
220 220
221/** 221/**
@@ -884,6 +884,41 @@ _scsih_scsi_lookup_find_by_target(struct MPT2SAS_ADAPTER *ioc, int id,
884} 884}
885 885
886/** 886/**
887 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
888 * @ioc: per adapter object
889 * @id: target id
890 * @lun: lun number
891 * @channel: channel
892 * Context: This function will acquire ioc->scsi_lookup_lock.
893 *
894 * This will search for a matching channel:id:lun in the scsi_lookup array,
895 * returning 1 if found.
896 */
897static u8
898_scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id,
899 unsigned int lun, int channel)
900{
901 u8 found;
902 unsigned long flags;
903 int i;
904
905 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
906 found = 0;
907 for (i = 0 ; i < ioc->request_depth; i++) {
908 if (ioc->scsi_lookup[i].scmd &&
909 (ioc->scsi_lookup[i].scmd->device->id == id &&
910 ioc->scsi_lookup[i].scmd->device->channel == channel &&
911 ioc->scsi_lookup[i].scmd->device->lun == lun)) {
912 found = 1;
913 goto out;
914 }
915 }
916 out:
917 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
918 return found;
919}
920
921/**
887 * _scsih_get_chain_buffer_dma - obtain block of chains (dma address) 922 * _scsih_get_chain_buffer_dma - obtain block of chains (dma address)
888 * @ioc: per adapter object 923 * @ioc: per adapter object
889 * @smid: system request message index 924 * @smid: system request message index
@@ -1047,14 +1082,14 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
1047} 1082}
1048 1083
1049/** 1084/**
1050 * scsih_change_queue_depth - setting device queue depth 1085 * _scsih_change_queue_depth - setting device queue depth
1051 * @sdev: scsi device struct 1086 * @sdev: scsi device struct
1052 * @qdepth: requested queue depth 1087 * @qdepth: requested queue depth
1053 * 1088 *
1054 * Returns queue depth. 1089 * Returns queue depth.
1055 */ 1090 */
1056static int 1091static int
1057scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1092_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1058{ 1093{
1059 struct Scsi_Host *shost = sdev->host; 1094 struct Scsi_Host *shost = sdev->host;
1060 int max_depth; 1095 int max_depth;
@@ -1079,14 +1114,14 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1079} 1114}
1080 1115
1081/** 1116/**
1082 * scsih_change_queue_depth - changing device queue tag type 1117 * _scsih_change_queue_depth - changing device queue tag type
1083 * @sdev: scsi device struct 1118 * @sdev: scsi device struct
1084 * @tag_type: requested tag type 1119 * @tag_type: requested tag type
1085 * 1120 *
1086 * Returns queue tag type. 1121 * Returns queue tag type.
1087 */ 1122 */
1088static int 1123static int
1089scsih_change_queue_type(struct scsi_device *sdev, int tag_type) 1124_scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
1090{ 1125{
1091 if (sdev->tagged_supported) { 1126 if (sdev->tagged_supported) {
1092 scsi_set_tag_type(sdev, tag_type); 1127 scsi_set_tag_type(sdev, tag_type);
@@ -1101,14 +1136,14 @@ scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
1101} 1136}
1102 1137
1103/** 1138/**
1104 * scsih_target_alloc - target add routine 1139 * _scsih_target_alloc - target add routine
1105 * @starget: scsi target struct 1140 * @starget: scsi target struct
1106 * 1141 *
1107 * Returns 0 if ok. Any other return is assumed to be an error and 1142 * Returns 0 if ok. Any other return is assumed to be an error and
1108 * the device is ignored. 1143 * the device is ignored.
1109 */ 1144 */
1110static int 1145static int
1111scsih_target_alloc(struct scsi_target *starget) 1146_scsih_target_alloc(struct scsi_target *starget)
1112{ 1147{
1113 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1148 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1114 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1149 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1163,13 +1198,13 @@ scsih_target_alloc(struct scsi_target *starget)
1163} 1198}
1164 1199
1165/** 1200/**
1166 * scsih_target_destroy - target destroy routine 1201 * _scsih_target_destroy - target destroy routine
1167 * @starget: scsi target struct 1202 * @starget: scsi target struct
1168 * 1203 *
1169 * Returns nothing. 1204 * Returns nothing.
1170 */ 1205 */
1171static void 1206static void
1172scsih_target_destroy(struct scsi_target *starget) 1207_scsih_target_destroy(struct scsi_target *starget)
1173{ 1208{
1174 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1209 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1175 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1210 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1212,14 +1247,14 @@ scsih_target_destroy(struct scsi_target *starget)
1212} 1247}
1213 1248
1214/** 1249/**
1215 * scsih_slave_alloc - device add routine 1250 * _scsih_slave_alloc - device add routine
1216 * @sdev: scsi device struct 1251 * @sdev: scsi device struct
1217 * 1252 *
1218 * Returns 0 if ok. Any other return is assumed to be an error and 1253 * Returns 0 if ok. Any other return is assumed to be an error and
1219 * the device is ignored. 1254 * the device is ignored.
1220 */ 1255 */
1221static int 1256static int
1222scsih_slave_alloc(struct scsi_device *sdev) 1257_scsih_slave_alloc(struct scsi_device *sdev)
1223{ 1258{
1224 struct Scsi_Host *shost; 1259 struct Scsi_Host *shost;
1225 struct MPT2SAS_ADAPTER *ioc; 1260 struct MPT2SAS_ADAPTER *ioc;
@@ -1273,13 +1308,13 @@ scsih_slave_alloc(struct scsi_device *sdev)
1273} 1308}
1274 1309
1275/** 1310/**
1276 * scsih_slave_destroy - device destroy routine 1311 * _scsih_slave_destroy - device destroy routine
1277 * @sdev: scsi device struct 1312 * @sdev: scsi device struct
1278 * 1313 *
1279 * Returns nothing. 1314 * Returns nothing.
1280 */ 1315 */
1281static void 1316static void
1282scsih_slave_destroy(struct scsi_device *sdev) 1317_scsih_slave_destroy(struct scsi_device *sdev)
1283{ 1318{
1284 struct MPT2SAS_TARGET *sas_target_priv_data; 1319 struct MPT2SAS_TARGET *sas_target_priv_data;
1285 struct scsi_target *starget; 1320 struct scsi_target *starget;
@@ -1295,13 +1330,13 @@ scsih_slave_destroy(struct scsi_device *sdev)
1295} 1330}
1296 1331
1297/** 1332/**
1298 * scsih_display_sata_capabilities - sata capabilities 1333 * _scsih_display_sata_capabilities - sata capabilities
1299 * @ioc: per adapter object 1334 * @ioc: per adapter object
1300 * @sas_device: the sas_device object 1335 * @sas_device: the sas_device object
1301 * @sdev: scsi device struct 1336 * @sdev: scsi device struct
1302 */ 1337 */
1303static void 1338static void
1304scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc, 1339_scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
1305 struct _sas_device *sas_device, struct scsi_device *sdev) 1340 struct _sas_device *sas_device, struct scsi_device *sdev)
1306{ 1341{
1307 Mpi2ConfigReply_t mpi_reply; 1342 Mpi2ConfigReply_t mpi_reply;
@@ -1401,14 +1436,14 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
1401} 1436}
1402 1437
1403/** 1438/**
1404 * scsih_slave_configure - device configure routine. 1439 * _scsih_slave_configure - device configure routine.
1405 * @sdev: scsi device struct 1440 * @sdev: scsi device struct
1406 * 1441 *
1407 * Returns 0 if ok. Any other return is assumed to be an error and 1442 * Returns 0 if ok. Any other return is assumed to be an error and
1408 * the device is ignored. 1443 * the device is ignored.
1409 */ 1444 */
1410static int 1445static int
1411scsih_slave_configure(struct scsi_device *sdev) 1446_scsih_slave_configure(struct scsi_device *sdev)
1412{ 1447{
1413 struct Scsi_Host *shost = sdev->host; 1448 struct Scsi_Host *shost = sdev->host;
1414 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1449 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1489,7 +1524,7 @@ scsih_slave_configure(struct scsi_device *sdev)
1489 r_level, raid_device->handle, 1524 r_level, raid_device->handle,
1490 (unsigned long long)raid_device->wwid, 1525 (unsigned long long)raid_device->wwid,
1491 raid_device->num_pds, ds); 1526 raid_device->num_pds, ds);
1492 scsih_change_queue_depth(sdev, qdepth); 1527 _scsih_change_queue_depth(sdev, qdepth);
1493 return 0; 1528 return 0;
1494 } 1529 }
1495 1530
@@ -1532,10 +1567,10 @@ scsih_slave_configure(struct scsi_device *sdev)
1532 sas_device->slot); 1567 sas_device->slot);
1533 1568
1534 if (!ssp_target) 1569 if (!ssp_target)
1535 scsih_display_sata_capabilities(ioc, sas_device, sdev); 1570 _scsih_display_sata_capabilities(ioc, sas_device, sdev);
1536 } 1571 }
1537 1572
1538 scsih_change_queue_depth(sdev, qdepth); 1573 _scsih_change_queue_depth(sdev, qdepth);
1539 1574
1540 if (ssp_target) 1575 if (ssp_target)
1541 sas_read_port_mode_page(sdev); 1576 sas_read_port_mode_page(sdev);
@@ -1543,7 +1578,7 @@ scsih_slave_configure(struct scsi_device *sdev)
1543} 1578}
1544 1579
1545/** 1580/**
1546 * scsih_bios_param - fetch head, sector, cylinder info for a disk 1581 * _scsih_bios_param - fetch head, sector, cylinder info for a disk
1547 * @sdev: scsi device struct 1582 * @sdev: scsi device struct
1548 * @bdev: pointer to block device context 1583 * @bdev: pointer to block device context
1549 * @capacity: device size (in 512 byte sectors) 1584 * @capacity: device size (in 512 byte sectors)
@@ -1555,7 +1590,7 @@ scsih_slave_configure(struct scsi_device *sdev)
1555 * Return nothing. 1590 * Return nothing.
1556 */ 1591 */
1557static int 1592static int
1558scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, 1593_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
1559 sector_t capacity, int params[]) 1594 sector_t capacity, int params[])
1560{ 1595{
1561 int heads; 1596 int heads;
@@ -1636,7 +1671,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
1636} 1671}
1637 1672
1638/** 1673/**
1639 * scsih_tm_done - tm completion routine 1674 * _scsih_tm_done - tm completion routine
1640 * @ioc: per adapter object 1675 * @ioc: per adapter object
1641 * @smid: system request message index 1676 * @smid: system request message index
1642 * @VF_ID: virtual function id 1677 * @VF_ID: virtual function id
@@ -1648,7 +1683,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
1648 * Return nothing. 1683 * Return nothing.
1649 */ 1684 */
1650static void 1685static void
1651scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) 1686_scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
1652{ 1687{
1653 MPI2DefaultReply_t *mpi_reply; 1688 MPI2DefaultReply_t *mpi_reply;
1654 1689
@@ -1823,13 +1858,13 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
1823} 1858}
1824 1859
1825/** 1860/**
1826 * scsih_abort - eh threads main abort routine 1861 * _scsih_abort - eh threads main abort routine
1827 * @sdev: scsi device struct 1862 * @sdev: scsi device struct
1828 * 1863 *
1829 * Returns SUCCESS if command aborted else FAILED 1864 * Returns SUCCESS if command aborted else FAILED
1830 */ 1865 */
1831static int 1866static int
1832scsih_abort(struct scsi_cmnd *scmd) 1867_scsih_abort(struct scsi_cmnd *scmd)
1833{ 1868{
1834 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 1869 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1835 struct MPT2SAS_DEVICE *sas_device_priv_data; 1870 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -1889,15 +1924,86 @@ scsih_abort(struct scsi_cmnd *scmd)
1889 return r; 1924 return r;
1890} 1925}
1891 1926
1927/**
1928 * _scsih_dev_reset - eh threads main device reset routine
1929 * @sdev: scsi device struct
1930 *
1931 * Returns SUCCESS if command aborted else FAILED
1932 */
1933static int
1934_scsih_dev_reset(struct scsi_cmnd *scmd)
1935{
1936 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1937 struct MPT2SAS_DEVICE *sas_device_priv_data;
1938 struct _sas_device *sas_device;
1939 unsigned long flags;
1940 u16 handle;
1941 int r;
1942
1943 printk(MPT2SAS_INFO_FMT "attempting device reset! scmd(%p)\n",
1944 ioc->name, scmd);
1945 scsi_print_command(scmd);
1946
1947 sas_device_priv_data = scmd->device->hostdata;
1948 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
1949 printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n",
1950 ioc->name, scmd);
1951 scmd->result = DID_NO_CONNECT << 16;
1952 scmd->scsi_done(scmd);
1953 r = SUCCESS;
1954 goto out;
1955 }
1956
1957 /* for hidden raid components obtain the volume_handle */
1958 handle = 0;
1959 if (sas_device_priv_data->sas_target->flags &
1960 MPT_TARGET_FLAGS_RAID_COMPONENT) {
1961 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1962 sas_device = _scsih_sas_device_find_by_handle(ioc,
1963 sas_device_priv_data->sas_target->handle);
1964 if (sas_device)
1965 handle = sas_device->volume_handle;
1966 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1967 } else
1968 handle = sas_device_priv_data->sas_target->handle;
1969
1970 if (!handle) {
1971 scmd->result = DID_RESET << 16;
1972 r = FAILED;
1973 goto out;
1974 }
1975
1976 mutex_lock(&ioc->tm_cmds.mutex);
1977 mpt2sas_scsih_issue_tm(ioc, handle, 0,
1978 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, scmd->device->lun,
1979 30);
1980
1981 /*
1982 * sanity check see whether all commands to this device been
1983 * completed
1984 */
1985 if (_scsih_scsi_lookup_find_by_lun(ioc, scmd->device->id,
1986 scmd->device->lun, scmd->device->channel))
1987 r = FAILED;
1988 else
1989 r = SUCCESS;
1990 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
1991 mutex_unlock(&ioc->tm_cmds.mutex);
1992
1993 out:
1994 printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n",
1995 ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
1996 return r;
1997}
1892 1998
1893/** 1999/**
1894 * scsih_dev_reset - eh threads main device reset routine 2000 * _scsih_target_reset - eh threads main target reset routine
1895 * @sdev: scsi device struct 2001 * @sdev: scsi device struct
1896 * 2002 *
1897 * Returns SUCCESS if command aborted else FAILED 2003 * Returns SUCCESS if command aborted else FAILED
1898 */ 2004 */
1899static int 2005static int
1900scsih_dev_reset(struct scsi_cmnd *scmd) 2006_scsih_target_reset(struct scsi_cmnd *scmd)
1901{ 2007{
1902 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2008 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1903 struct MPT2SAS_DEVICE *sas_device_priv_data; 2009 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -1912,7 +2018,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
1912 2018
1913 sas_device_priv_data = scmd->device->hostdata; 2019 sas_device_priv_data = scmd->device->hostdata;
1914 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 2020 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
1915 printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n", 2021 printk(MPT2SAS_INFO_FMT "target been deleted! scmd(%p)\n",
1916 ioc->name, scmd); 2022 ioc->name, scmd);
1917 scmd->result = DID_NO_CONNECT << 16; 2023 scmd->result = DID_NO_CONNECT << 16;
1918 scmd->scsi_done(scmd); 2024 scmd->scsi_done(scmd);
@@ -1962,13 +2068,13 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
1962} 2068}
1963 2069
1964/** 2070/**
1965 * scsih_abort - eh threads main host reset routine 2071 * _scsih_abort - eh threads main host reset routine
1966 * @sdev: scsi device struct 2072 * @sdev: scsi device struct
1967 * 2073 *
1968 * Returns SUCCESS if command aborted else FAILED 2074 * Returns SUCCESS if command aborted else FAILED
1969 */ 2075 */
1970static int 2076static int
1971scsih_host_reset(struct scsi_cmnd *scmd) 2077_scsih_host_reset(struct scsi_cmnd *scmd)
1972{ 2078{
1973 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2079 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1974 int r, retval; 2080 int r, retval;
@@ -2390,7 +2496,107 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
2390} 2496}
2391 2497
2392/** 2498/**
2393 * scsih_qcmd - main scsi request entry point 2499 * _scsih_setup_eedp - setup MPI request for EEDP transfer
2500 * @scmd: pointer to scsi command object
2501 * @mpi_request: pointer to the SCSI_IO reqest message frame
2502 *
2503 * Supporting protection 1 and 3.
2504 *
2505 * Returns nothing
2506 */
2507static void
2508_scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
2509{
2510 u16 eedp_flags;
2511 unsigned char prot_op = scsi_get_prot_op(scmd);
2512 unsigned char prot_type = scsi_get_prot_type(scmd);
2513
2514 if (prot_type == SCSI_PROT_DIF_TYPE0 ||
2515 prot_type == SCSI_PROT_DIF_TYPE2 ||
2516 prot_op == SCSI_PROT_NORMAL)
2517 return;
2518
2519 if (prot_op == SCSI_PROT_READ_STRIP)
2520 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
2521 else if (prot_op == SCSI_PROT_WRITE_INSERT)
2522 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2523 else
2524 return;
2525
2526 mpi_request->EEDPBlockSize = scmd->device->sector_size;
2527
2528 switch (prot_type) {
2529 case SCSI_PROT_DIF_TYPE1:
2530
2531 /*
2532 * enable ref/guard checking
2533 * auto increment ref tag
2534 */
2535 mpi_request->EEDPFlags = eedp_flags |
2536 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2537 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2538 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2539 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
2540 cpu_to_be32(scsi_get_lba(scmd));
2541
2542 break;
2543
2544 case SCSI_PROT_DIF_TYPE3:
2545
2546 /*
2547 * enable guard checking
2548 */
2549 mpi_request->EEDPFlags = eedp_flags |
2550 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2551
2552 break;
2553 }
2554}
2555
2556/**
2557 * _scsih_eedp_error_handling - return sense code for EEDP errors
2558 * @scmd: pointer to scsi command object
2559 * @ioc_status: ioc status
2560 *
2561 * Returns nothing
2562 */
2563static void
2564_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
2565{
2566 u8 ascq;
2567 u8 sk;
2568 u8 host_byte;
2569
2570 switch (ioc_status) {
2571 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2572 ascq = 0x01;
2573 break;
2574 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2575 ascq = 0x02;
2576 break;
2577 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2578 ascq = 0x03;
2579 break;
2580 default:
2581 ascq = 0x00;
2582 break;
2583 }
2584
2585 if (scmd->sc_data_direction == DMA_TO_DEVICE) {
2586 sk = ILLEGAL_REQUEST;
2587 host_byte = DID_ABORT;
2588 } else {
2589 sk = ABORTED_COMMAND;
2590 host_byte = DID_OK;
2591 }
2592
2593 scsi_build_sense_buffer(0, scmd->sense_buffer, sk, 0x10, ascq);
2594 scmd->result = DRIVER_SENSE << 24 | (host_byte << 16) |
2595 SAM_STAT_CHECK_CONDITION;
2596}
2597
2598/**
2599 * _scsih_qcmd - main scsi request entry point
2394 * @scmd: pointer to scsi command object 2600 * @scmd: pointer to scsi command object
2395 * @done: function pointer to be invoked on completion 2601 * @done: function pointer to be invoked on completion
2396 * 2602 *
@@ -2401,7 +2607,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
2401 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full 2607 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
2402 */ 2608 */
2403static int 2609static int
2404scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) 2610_scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2405{ 2611{
2406 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2612 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2407 struct MPT2SAS_DEVICE *sas_device_priv_data; 2613 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -2470,6 +2676,7 @@ scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2470 } 2676 }
2471 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); 2677 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
2472 memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); 2678 memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
2679 _scsih_setup_eedp(scmd, mpi_request);
2473 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 2680 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2474 if (sas_device_priv_data->sas_target->flags & 2681 if (sas_device_priv_data->sas_target->flags &
2475 MPT_TARGET_FLAGS_RAID_COMPONENT) 2682 MPT_TARGET_FLAGS_RAID_COMPONENT)
@@ -2604,6 +2811,15 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
2604 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 2811 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2605 desc_ioc_state = "scsi ext terminated"; 2812 desc_ioc_state = "scsi ext terminated";
2606 break; 2813 break;
2814 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2815 desc_ioc_state = "eedp guard error";
2816 break;
2817 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2818 desc_ioc_state = "eedp ref tag error";
2819 break;
2820 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2821 desc_ioc_state = "eedp app tag error";
2822 break;
2607 default: 2823 default:
2608 desc_ioc_state = "unknown"; 2824 desc_ioc_state = "unknown";
2609 break; 2825 break;
@@ -2783,7 +2999,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2783} 2999}
2784 3000
2785/** 3001/**
2786 * scsih_io_done - scsi request callback 3002 * _scsih_io_done - scsi request callback
2787 * @ioc: per adapter object 3003 * @ioc: per adapter object
2788 * @smid: system request message index 3004 * @smid: system request message index
2789 * @VF_ID: virtual function id 3005 * @VF_ID: virtual function id
@@ -2794,7 +3010,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2794 * Return nothing. 3010 * Return nothing.
2795 */ 3011 */
2796static void 3012static void
2797scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) 3013_scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
2798{ 3014{
2799 Mpi2SCSIIORequest_t *mpi_request; 3015 Mpi2SCSIIORequest_t *mpi_request;
2800 Mpi2SCSIIOReply_t *mpi_reply; 3016 Mpi2SCSIIOReply_t *mpi_reply;
@@ -2939,6 +3155,11 @@ scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
2939 scmd->result = DID_RESET << 16; 3155 scmd->result = DID_RESET << 16;
2940 break; 3156 break;
2941 3157
3158 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
3159 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
3160 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
3161 _scsih_eedp_error_handling(scmd, ioc_status);
3162 break;
2942 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3163 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2943 case MPI2_IOCSTATUS_INVALID_FUNCTION: 3164 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2944 case MPI2_IOCSTATUS_INVALID_SGL: 3165 case MPI2_IOCSTATUS_INVALID_SGL:
@@ -5130,18 +5351,19 @@ static struct scsi_host_template scsih_driver_template = {
5130 .module = THIS_MODULE, 5351 .module = THIS_MODULE,
5131 .name = "Fusion MPT SAS Host", 5352 .name = "Fusion MPT SAS Host",
5132 .proc_name = MPT2SAS_DRIVER_NAME, 5353 .proc_name = MPT2SAS_DRIVER_NAME,
5133 .queuecommand = scsih_qcmd, 5354 .queuecommand = _scsih_qcmd,
5134 .target_alloc = scsih_target_alloc, 5355 .target_alloc = _scsih_target_alloc,
5135 .slave_alloc = scsih_slave_alloc, 5356 .slave_alloc = _scsih_slave_alloc,
5136 .slave_configure = scsih_slave_configure, 5357 .slave_configure = _scsih_slave_configure,
5137 .target_destroy = scsih_target_destroy, 5358 .target_destroy = _scsih_target_destroy,
5138 .slave_destroy = scsih_slave_destroy, 5359 .slave_destroy = _scsih_slave_destroy,
5139 .change_queue_depth = scsih_change_queue_depth, 5360 .change_queue_depth = _scsih_change_queue_depth,
5140 .change_queue_type = scsih_change_queue_type, 5361 .change_queue_type = _scsih_change_queue_type,
5141 .eh_abort_handler = scsih_abort, 5362 .eh_abort_handler = _scsih_abort,
5142 .eh_device_reset_handler = scsih_dev_reset, 5363 .eh_device_reset_handler = _scsih_dev_reset,
5143 .eh_host_reset_handler = scsih_host_reset, 5364 .eh_target_reset_handler = _scsih_target_reset,
5144 .bios_param = scsih_bios_param, 5365 .eh_host_reset_handler = _scsih_host_reset,
5366 .bios_param = _scsih_bios_param,
5145 .can_queue = 1, 5367 .can_queue = 1,
5146 .this_id = -1, 5368 .this_id = -1,
5147 .sg_tablesize = MPT2SAS_SG_DEPTH, 5369 .sg_tablesize = MPT2SAS_SG_DEPTH,
@@ -5228,13 +5450,13 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
5228} 5450}
5229 5451
5230/** 5452/**
5231 * scsih_remove - detach and remove add host 5453 * _scsih_remove - detach and remove add host
5232 * @pdev: PCI device struct 5454 * @pdev: PCI device struct
5233 * 5455 *
5234 * Return nothing. 5456 * Return nothing.
5235 */ 5457 */
5236static void __devexit 5458static void __devexit
5237scsih_remove(struct pci_dev *pdev) 5459_scsih_remove(struct pci_dev *pdev)
5238{ 5460{
5239 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5461 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5240 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 5462 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5442,14 +5664,14 @@ _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc)
5442} 5664}
5443 5665
5444/** 5666/**
5445 * scsih_probe - attach and add scsi host 5667 * _scsih_probe - attach and add scsi host
5446 * @pdev: PCI device struct 5668 * @pdev: PCI device struct
5447 * @id: pci device id 5669 * @id: pci device id
5448 * 5670 *
5449 * Returns 0 success, anything else error. 5671 * Returns 0 success, anything else error.
5450 */ 5672 */
5451static int 5673static int
5452scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) 5674_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5453{ 5675{
5454 struct MPT2SAS_ADAPTER *ioc; 5676 struct MPT2SAS_ADAPTER *ioc;
5455 struct Scsi_Host *shost; 5677 struct Scsi_Host *shost;
@@ -5503,6 +5725,9 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5503 goto out_add_shost_fail; 5725 goto out_add_shost_fail;
5504 } 5726 }
5505 5727
5728 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
5729 | SHOST_DIF_TYPE3_PROTECTION);
5730
5506 /* event thread */ 5731 /* event thread */
5507 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 5732 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
5508 "fw_event%d", ioc->id); 5733 "fw_event%d", ioc->id);
@@ -5536,14 +5761,14 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5536 5761
5537#ifdef CONFIG_PM 5762#ifdef CONFIG_PM
5538/** 5763/**
5539 * scsih_suspend - power management suspend main entry point 5764 * _scsih_suspend - power management suspend main entry point
5540 * @pdev: PCI device struct 5765 * @pdev: PCI device struct
5541 * @state: PM state change to (usually PCI_D3) 5766 * @state: PM state change to (usually PCI_D3)
5542 * 5767 *
5543 * Returns 0 success, anything else error. 5768 * Returns 0 success, anything else error.
5544 */ 5769 */
5545static int 5770static int
5546scsih_suspend(struct pci_dev *pdev, pm_message_t state) 5771_scsih_suspend(struct pci_dev *pdev, pm_message_t state)
5547{ 5772{
5548 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5773 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5549 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 5774 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5564,13 +5789,13 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
5564} 5789}
5565 5790
5566/** 5791/**
5567 * scsih_resume - power management resume main entry point 5792 * _scsih_resume - power management resume main entry point
5568 * @pdev: PCI device struct 5793 * @pdev: PCI device struct
5569 * 5794 *
5570 * Returns 0 success, anything else error. 5795 * Returns 0 success, anything else error.
5571 */ 5796 */
5572static int 5797static int
5573scsih_resume(struct pci_dev *pdev) 5798_scsih_resume(struct pci_dev *pdev)
5574{ 5799{
5575 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5800 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5576 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 5801 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5599,22 +5824,22 @@ scsih_resume(struct pci_dev *pdev)
5599static struct pci_driver scsih_driver = { 5824static struct pci_driver scsih_driver = {
5600 .name = MPT2SAS_DRIVER_NAME, 5825 .name = MPT2SAS_DRIVER_NAME,
5601 .id_table = scsih_pci_table, 5826 .id_table = scsih_pci_table,
5602 .probe = scsih_probe, 5827 .probe = _scsih_probe,
5603 .remove = __devexit_p(scsih_remove), 5828 .remove = __devexit_p(_scsih_remove),
5604#ifdef CONFIG_PM 5829#ifdef CONFIG_PM
5605 .suspend = scsih_suspend, 5830 .suspend = _scsih_suspend,
5606 .resume = scsih_resume, 5831 .resume = _scsih_resume,
5607#endif 5832#endif
5608}; 5833};
5609 5834
5610 5835
5611/** 5836/**
5612 * scsih_init - main entry point for this driver. 5837 * _scsih_init - main entry point for this driver.
5613 * 5838 *
5614 * Returns 0 success, anything else error. 5839 * Returns 0 success, anything else error.
5615 */ 5840 */
5616static int __init 5841static int __init
5617scsih_init(void) 5842_scsih_init(void)
5618{ 5843{
5619 int error; 5844 int error;
5620 5845
@@ -5630,10 +5855,10 @@ scsih_init(void)
5630 mpt2sas_base_initialize_callback_handler(); 5855 mpt2sas_base_initialize_callback_handler();
5631 5856
5632 /* queuecommand callback hander */ 5857 /* queuecommand callback hander */
5633 scsi_io_cb_idx = mpt2sas_base_register_callback_handler(scsih_io_done); 5858 scsi_io_cb_idx = mpt2sas_base_register_callback_handler(_scsih_io_done);
5634 5859
5635 /* task managment callback handler */ 5860 /* task managment callback handler */
5636 tm_cb_idx = mpt2sas_base_register_callback_handler(scsih_tm_done); 5861 tm_cb_idx = mpt2sas_base_register_callback_handler(_scsih_tm_done);
5637 5862
5638 /* base internal commands callback handler */ 5863 /* base internal commands callback handler */
5639 base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done); 5864 base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done);
@@ -5659,12 +5884,12 @@ scsih_init(void)
5659} 5884}
5660 5885
5661/** 5886/**
5662 * scsih_exit - exit point for this driver (when it is a module). 5887 * _scsih_exit - exit point for this driver (when it is a module).
5663 * 5888 *
5664 * Returns 0 success, anything else error. 5889 * Returns 0 success, anything else error.
5665 */ 5890 */
5666static void __exit 5891static void __exit
5667scsih_exit(void) 5892_scsih_exit(void)
5668{ 5893{
5669 printk(KERN_INFO "mpt2sas version %s unloading\n", 5894 printk(KERN_INFO "mpt2sas version %s unloading\n",
5670 MPT2SAS_DRIVER_VERSION); 5895 MPT2SAS_DRIVER_VERSION);
@@ -5682,5 +5907,5 @@ scsih_exit(void)
5682 mpt2sas_ctl_exit(); 5907 mpt2sas_ctl_exit();
5683} 5908}
5684 5909
5685module_init(scsih_init); 5910module_init(_scsih_init);
5686module_exit(scsih_exit); 5911module_exit(_scsih_exit);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index e03dc0b1e1a0..686695b155c7 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -264,7 +264,7 @@ struct rep_manu_reply{
264}; 264};
265 265
266/** 266/**
267 * transport_expander_report_manufacture - obtain SMP report_manufacture 267 * _transport_expander_report_manufacture - obtain SMP report_manufacture
268 * @ioc: per adapter object 268 * @ioc: per adapter object
269 * @sas_address: expander sas address 269 * @sas_address: expander sas address
270 * @edev: the sas_expander_device object 270 * @edev: the sas_expander_device object
@@ -274,7 +274,7 @@ struct rep_manu_reply{
274 * Returns 0 for success, non-zero for failure. 274 * Returns 0 for success, non-zero for failure.
275 */ 275 */
276static int 276static int
277transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, 277_transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
278 u64 sas_address, struct sas_expander_device *edev) 278 u64 sas_address, struct sas_expander_device *edev)
279{ 279{
280 Mpi2SmpPassthroughRequest_t *mpi_request; 280 Mpi2SmpPassthroughRequest_t *mpi_request;
@@ -578,7 +578,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
578 MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER || 578 MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
579 mpt2sas_port->remote_identify.device_type == 579 mpt2sas_port->remote_identify.device_type ==
580 MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) 580 MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
581 transport_expander_report_manufacture(ioc, 581 _transport_expander_report_manufacture(ioc,
582 mpt2sas_port->remote_identify.sas_address, 582 mpt2sas_port->remote_identify.sas_address,
583 rphy_to_expander_device(rphy)); 583 rphy_to_expander_device(rphy));
584 584
@@ -852,7 +852,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
852} 852}
853 853
854/** 854/**
855 * transport_get_linkerrors - 855 * _transport_get_linkerrors -
856 * @phy: The sas phy object 856 * @phy: The sas phy object
857 * 857 *
858 * Only support sas_host direct attached phys. 858 * Only support sas_host direct attached phys.
@@ -860,7 +860,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
860 * 860 *
861 */ 861 */
862static int 862static int
863transport_get_linkerrors(struct sas_phy *phy) 863_transport_get_linkerrors(struct sas_phy *phy)
864{ 864{
865 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); 865 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
866 struct _sas_phy *mpt2sas_phy; 866 struct _sas_phy *mpt2sas_phy;
@@ -903,14 +903,14 @@ transport_get_linkerrors(struct sas_phy *phy)
903} 903}
904 904
905/** 905/**
906 * transport_get_enclosure_identifier - 906 * _transport_get_enclosure_identifier -
907 * @phy: The sas phy object 907 * @phy: The sas phy object
908 * 908 *
909 * Obtain the enclosure logical id for an expander. 909 * Obtain the enclosure logical id for an expander.
910 * Returns 0 for success, non-zero for failure. 910 * Returns 0 for success, non-zero for failure.
911 */ 911 */
912static int 912static int
913transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) 913_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
914{ 914{
915 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); 915 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
916 struct _sas_node *sas_expander; 916 struct _sas_node *sas_expander;
@@ -929,13 +929,13 @@ transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
929} 929}
930 930
931/** 931/**
932 * transport_get_bay_identifier - 932 * _transport_get_bay_identifier -
933 * @phy: The sas phy object 933 * @phy: The sas phy object
934 * 934 *
935 * Returns the slot id for a device that resides inside an enclosure. 935 * Returns the slot id for a device that resides inside an enclosure.
936 */ 936 */
937static int 937static int
938transport_get_bay_identifier(struct sas_rphy *rphy) 938_transport_get_bay_identifier(struct sas_rphy *rphy)
939{ 939{
940 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); 940 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
941 struct _sas_device *sas_device; 941 struct _sas_device *sas_device;
@@ -953,7 +953,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
953} 953}
954 954
955/** 955/**
956 * transport_phy_reset - 956 * _transport_phy_reset -
957 * @phy: The sas phy object 957 * @phy: The sas phy object
958 * @hard_reset: 958 * @hard_reset:
959 * 959 *
@@ -961,7 +961,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
961 * Returns 0 for success, non-zero for failure. 961 * Returns 0 for success, non-zero for failure.
962 */ 962 */
963static int 963static int
964transport_phy_reset(struct sas_phy *phy, int hard_reset) 964_transport_phy_reset(struct sas_phy *phy, int hard_reset)
965{ 965{
966 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); 966 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
967 struct _sas_phy *mpt2sas_phy; 967 struct _sas_phy *mpt2sas_phy;
@@ -1002,7 +1002,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
1002} 1002}
1003 1003
1004/** 1004/**
1005 * transport_smp_handler - transport portal for smp passthru 1005 * _transport_smp_handler - transport portal for smp passthru
1006 * @shost: shost object 1006 * @shost: shost object
1007 * @rphy: sas transport rphy object 1007 * @rphy: sas transport rphy object
1008 * @req: 1008 * @req:
@@ -1012,7 +1012,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
1012 * smp_rep_general /sys/class/bsg/expander-5:0 1012 * smp_rep_general /sys/class/bsg/expander-5:0
1013 */ 1013 */
1014static int 1014static int
1015transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, 1015_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1016 struct request *req) 1016 struct request *req)
1017{ 1017{
1018 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1018 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1041,7 +1041,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1041 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1041 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1042 printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, " 1042 printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, "
1043 "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt, 1043 "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt,
1044 req->data_len, rsp->bio->bi_vcnt, rsp->data_len); 1044 blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1045 return -EINVAL; 1045 return -EINVAL;
1046 } 1046 }
1047 1047
@@ -1104,7 +1104,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1104 *((u64 *)&mpi_request->SASAddress) = (rphy) ? 1104 *((u64 *)&mpi_request->SASAddress) = (rphy) ?
1105 cpu_to_le64(rphy->identify.sas_address) : 1105 cpu_to_le64(rphy->identify.sas_address) :
1106 cpu_to_le64(ioc->sas_hba.sas_address); 1106 cpu_to_le64(ioc->sas_hba.sas_address);
1107 mpi_request->RequestDataLength = cpu_to_le16(req->data_len - 4); 1107 mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
1108 psge = &mpi_request->SGL; 1108 psge = &mpi_request->SGL;
1109 1109
1110 /* WRITE sgel first */ 1110 /* WRITE sgel first */
@@ -1112,13 +1112,13 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1112 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 1112 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1113 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1113 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1114 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), 1114 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
1115 req->data_len, PCI_DMA_BIDIRECTIONAL); 1115 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
1116 if (!dma_addr_out) { 1116 if (!dma_addr_out) {
1117 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1117 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
1118 goto unmap; 1118 goto unmap;
1119 } 1119 }
1120 1120
1121 ioc->base_add_sg_single(psge, sgl_flags | (req->data_len - 4), 1121 ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4),
1122 dma_addr_out); 1122 dma_addr_out);
1123 1123
1124 /* incr sgel */ 1124 /* incr sgel */
@@ -1129,14 +1129,14 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1129 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1129 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1130 MPI2_SGE_FLAGS_END_OF_LIST); 1130 MPI2_SGE_FLAGS_END_OF_LIST);
1131 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1131 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1132 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), 1132 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
1133 rsp->data_len, PCI_DMA_BIDIRECTIONAL); 1133 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
1134 if (!dma_addr_in) { 1134 if (!dma_addr_in) {
1135 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1135 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
1136 goto unmap; 1136 goto unmap;
1137 } 1137 }
1138 1138
1139 ioc->base_add_sg_single(psge, sgl_flags | (rsp->data_len + 4), 1139 ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4),
1140 dma_addr_in); 1140 dma_addr_in);
1141 1141
1142 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - " 1142 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - "
@@ -1170,9 +1170,8 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1170 1170
1171 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply)); 1171 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
1172 req->sense_len = sizeof(*mpi_reply); 1172 req->sense_len = sizeof(*mpi_reply);
1173 req->data_len = 0; 1173 req->resid_len = 0;
1174 rsp->data_len -= mpi_reply->ResponseDataLength; 1174 rsp->resid_len -= mpi_reply->ResponseDataLength;
1175
1176 } else { 1175 } else {
1177 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT 1176 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
1178 "%s - no reply\n", ioc->name, __func__)); 1177 "%s - no reply\n", ioc->name, __func__));
@@ -1188,10 +1187,10 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1188 1187
1189 unmap: 1188 unmap:
1190 if (dma_addr_out) 1189 if (dma_addr_out)
1191 pci_unmap_single(ioc->pdev, dma_addr_out, req->data_len, 1190 pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
1192 PCI_DMA_BIDIRECTIONAL); 1191 PCI_DMA_BIDIRECTIONAL);
1193 if (dma_addr_in) 1192 if (dma_addr_in)
1194 pci_unmap_single(ioc->pdev, dma_addr_in, rsp->data_len, 1193 pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
1195 PCI_DMA_BIDIRECTIONAL); 1194 PCI_DMA_BIDIRECTIONAL);
1196 1195
1197 out: 1196 out:
@@ -1201,11 +1200,11 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1201} 1200}
1202 1201
1203struct sas_function_template mpt2sas_transport_functions = { 1202struct sas_function_template mpt2sas_transport_functions = {
1204 .get_linkerrors = transport_get_linkerrors, 1203 .get_linkerrors = _transport_get_linkerrors,
1205 .get_enclosure_identifier = transport_get_enclosure_identifier, 1204 .get_enclosure_identifier = _transport_get_enclosure_identifier,
1206 .get_bay_identifier = transport_get_bay_identifier, 1205 .get_bay_identifier = _transport_get_bay_identifier,
1207 .phy_reset = transport_phy_reset, 1206 .phy_reset = _transport_phy_reset,
1208 .smp_handler = transport_smp_handler, 1207 .smp_handler = _transport_smp_handler,
1209}; 1208};
1210 1209
1211struct scsi_transport_template *mpt2sas_transport_template; 1210struct scsi_transport_template *mpt2sas_transport_template;
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
deleted file mode 100644
index e4acebd10d1b..000000000000
--- a/drivers/scsi/mvsas.c
+++ /dev/null
@@ -1,3222 +0,0 @@
1/*
2 mvsas.c - Marvell 88SE6440 SAS/SATA support
3
4 Copyright 2007 Red Hat, Inc.
5 Copyright 2008 Marvell. <kewei@marvell.com>
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2,
10 or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty
14 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 See the GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public
18 License along with this program; see the file COPYING. If not,
19 write to the Free Software Foundation, 675 Mass Ave, Cambridge,
20 MA 02139, USA.
21
22 ---------------------------------------------------------------
23
24 Random notes:
25 * hardware supports controlling the endian-ness of data
26 structures. this permits elimination of all the le32_to_cpu()
27 and cpu_to_le32() conversions.
28
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/interrupt.h>
35#include <linux/spinlock.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/ctype.h>
39#include <scsi/libsas.h>
40#include <scsi/scsi_tcq.h>
41#include <scsi/sas_ata.h>
42#include <asm/io.h>
43
44#define DRV_NAME "mvsas"
45#define DRV_VERSION "0.5.2"
46#define _MV_DUMP 0
47#define MVS_DISABLE_NVRAM
48#define MVS_DISABLE_MSI
49
50#define mr32(reg) readl(regs + MVS_##reg)
51#define mw32(reg,val) writel((val), regs + MVS_##reg)
52#define mw32_f(reg,val) do { \
53 writel((val), regs + MVS_##reg); \
54 readl(regs + MVS_##reg); \
55 } while (0)
56
57#define MVS_ID_NOT_MAPPED 0x7f
58#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
59
60/* offset for D2H FIS in the Received FIS List Structure */
61#define SATA_RECEIVED_D2H_FIS(reg_set) \
62 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
63#define SATA_RECEIVED_PIO_FIS(reg_set) \
64 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
65#define UNASSOC_D2H_FIS(id) \
66 ((void *) mvi->rx_fis + 0x100 * id)
67
68#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \
69 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
70 (__mc) != 0 && __rest; \
71 (++__lseq), (__mc) >>= 1)
72
73/* driver compile-time configuration */
74enum driver_configuration {
75 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
76 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
77 /* software requires power-of-2
78 ring size */
79
80 MVS_SLOTS = 512, /* command slots */
81 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
82 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
83 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
84 MVS_OAF_SZ = 64, /* Open address frame buffer size */
85
86 MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */
87
88 MVS_QUEUE_SIZE = 30, /* Support Queue depth */
89 MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */
90};
91
92/* unchangeable hardware details */
93enum hardware_details {
94 MVS_MAX_PHYS = 8, /* max. possible phys */
95 MVS_MAX_PORTS = 8, /* max. possible ports */
96 MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100),
97};
98
99/* peripheral registers (BAR2) */
100enum peripheral_registers {
101 SPI_CTL = 0x10, /* EEPROM control */
102 SPI_CMD = 0x14, /* EEPROM command */
103 SPI_DATA = 0x18, /* EEPROM data */
104};
105
106enum peripheral_register_bits {
107 TWSI_RDY = (1U << 7), /* EEPROM interface ready */
108 TWSI_RD = (1U << 4), /* EEPROM read access */
109
110 SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
111};
112
113/* enhanced mode registers (BAR4) */
114enum hw_registers {
115 MVS_GBL_CTL = 0x04, /* global control */
116 MVS_GBL_INT_STAT = 0x08, /* global irq status */
117 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
118 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
119
120 MVS_CTL = 0x100, /* SAS/SATA port configuration */
121 MVS_PCS = 0x104, /* SAS/SATA port control/status */
122 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
123 MVS_CMD_LIST_HI = 0x10C,
124 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
125 MVS_RX_FIS_HI = 0x114,
126
127 MVS_TX_CFG = 0x120, /* TX configuration */
128 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
129 MVS_TX_HI = 0x128,
130
131 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
132 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
133 MVS_RX_CFG = 0x134, /* RX configuration */
134 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
135 MVS_RX_HI = 0x13C,
136 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
137
138 MVS_INT_COAL = 0x148, /* Int coalescing config */
139 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
140 MVS_INT_STAT = 0x150, /* Central int status */
141 MVS_INT_MASK = 0x154, /* Central int enable */
142 MVS_INT_STAT_SRS = 0x158, /* SATA register set status */
143 MVS_INT_MASK_SRS = 0x15C,
144
145 /* ports 1-3 follow after this */
146 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
147 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
148 MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */
149 MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */
150
151 /* ports 1-3 follow after this */
152 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
153 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
154
155 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
156 MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
157
158 /* ports 1-3 follow after this */
159 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
160 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
161 MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */
162 MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */
163
164 /* ports 1-3 follow after this */
165 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
166 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
167 MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */
168 MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */
169};
170
171enum hw_register_bits {
172 /* MVS_GBL_CTL */
173 INT_EN = (1U << 1), /* Global int enable */
174 HBA_RST = (1U << 0), /* HBA reset */
175
176 /* MVS_GBL_INT_STAT */
177 INT_XOR = (1U << 4), /* XOR engine event */
178 INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
179
180 /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
181 SATA_TARGET = (1U << 16), /* port0 SATA target enable */
182 MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
183 MODE_AUTO_DET_PORT6 = (1U << 14),
184 MODE_AUTO_DET_PORT5 = (1U << 13),
185 MODE_AUTO_DET_PORT4 = (1U << 12),
186 MODE_AUTO_DET_PORT3 = (1U << 11),
187 MODE_AUTO_DET_PORT2 = (1U << 10),
188 MODE_AUTO_DET_PORT1 = (1U << 9),
189 MODE_AUTO_DET_PORT0 = (1U << 8),
190 MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
191 MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
192 MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
193 MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
194 MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
195 MODE_SAS_PORT6_MASK = (1U << 6),
196 MODE_SAS_PORT5_MASK = (1U << 5),
197 MODE_SAS_PORT4_MASK = (1U << 4),
198 MODE_SAS_PORT3_MASK = (1U << 3),
199 MODE_SAS_PORT2_MASK = (1U << 2),
200 MODE_SAS_PORT1_MASK = (1U << 1),
201 MODE_SAS_PORT0_MASK = (1U << 0),
202 MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
203 MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
204 MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
205 MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
206
207 /* SAS_MODE value may be
208 * dictated (in hw) by values
209 * of SATA_TARGET & AUTO_DET
210 */
211
212 /* MVS_TX_CFG */
213 TX_EN = (1U << 16), /* Enable TX */
214 TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
215
216 /* MVS_RX_CFG */
217 RX_EN = (1U << 16), /* Enable RX */
218 RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
219
220 /* MVS_INT_COAL */
221 COAL_EN = (1U << 16), /* Enable int coalescing */
222
223 /* MVS_INT_STAT, MVS_INT_MASK */
224 CINT_I2C = (1U << 31), /* I2C event */
225 CINT_SW0 = (1U << 30), /* software event 0 */
226 CINT_SW1 = (1U << 29), /* software event 1 */
227 CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
228 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
229 CINT_MEM = (1U << 26), /* int mem parity err */
230 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
231 CINT_SRS = (1U << 3), /* SRS event */
232 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
233 CINT_DONE = (1U << 0), /* cmd completion */
234
235 /* shl for ports 1-3 */
236 CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
237 CINT_PORT = (1U << 8), /* port0 event */
238 CINT_PORT_MASK_OFFSET = 8,
239 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
240
241 /* TX (delivery) ring bits */
242 TXQ_CMD_SHIFT = 29,
243 TXQ_CMD_SSP = 1, /* SSP protocol */
244 TXQ_CMD_SMP = 2, /* SMP protocol */
245 TXQ_CMD_STP = 3, /* STP/SATA protocol */
246 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
247 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
248 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
249 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
250 TXQ_SRS_SHIFT = 20, /* SATA register set */
251 TXQ_SRS_MASK = 0x7f,
252 TXQ_PHY_SHIFT = 12, /* PHY bitmap */
253 TXQ_PHY_MASK = 0xff,
254 TXQ_SLOT_MASK = 0xfff, /* slot number */
255
256 /* RX (completion) ring bits */
257 RXQ_GOOD = (1U << 23), /* Response good */
258 RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
259 RXQ_CMD_RX = (1U << 20), /* target cmd received */
260 RXQ_ATTN = (1U << 19), /* attention */
261 RXQ_RSP = (1U << 18), /* response frame xfer'd */
262 RXQ_ERR = (1U << 17), /* err info rec xfer'd */
263 RXQ_DONE = (1U << 16), /* cmd complete */
264 RXQ_SLOT_MASK = 0xfff, /* slot number */
265
266 /* mvs_cmd_hdr bits */
267 MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
268 MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
269
270 /* SSP initiator only */
271 MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
272
273 /* SSP initiator or target */
274 MCH_SSP_FR_TASK = 0x1, /* TASK frame */
275
276 /* SSP target only */
277 MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
278 MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
279 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
280 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
281
282 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
283 MCH_FBURST = (1U << 11), /* first burst (SSP) */
284 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
285 MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
286 MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
287 MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
288 MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
289 MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
290 MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
291 MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
292
293 CCTL_RST = (1U << 5), /* port logic reset */
294
295 /* 0(LSB first), 1(MSB first) */
296 CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
297 CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
298 CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
299 CCTL_ENDIAN_CMD = (1U << 0), /* command table */
300
301 /* MVS_Px_SER_CTLSTAT (per-phy control) */
302 PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
303 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
304 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
305 PHY_RST = (1U << 0), /* phy reset */
306 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
307 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
308 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
309 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
310 (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
311 PHY_READY_MASK = (1U << 20),
312
313 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
314 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
315 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
316 PHYEV_AN = (1U << 18), /* SATA async notification */
317 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
318 PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
319 PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
320 PHYEV_IU_BIG = (1U << 11), /* IU too long err */
321 PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
322 PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
323 PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
324 PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
325 PHYEV_PORT_SEL = (1U << 6), /* port selector present */
326 PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
327 PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
328 PHYEV_ID_FAIL = (1U << 3), /* identify failed */
329 PHYEV_ID_DONE = (1U << 2), /* identify done */
330 PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
331 PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
332
333 /* MVS_PCS */
334 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
335 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
336 PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */
337 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
338 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
339 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
340 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
341 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
342 PCS_CMD_RST = (1U << 1), /* reset cmd issue */
343 PCS_CMD_EN = (1U << 0), /* enable cmd issue */
344
345 /* Port n Attached Device Info */
346 PORT_DEV_SSP_TRGT = (1U << 19),
347 PORT_DEV_SMP_TRGT = (1U << 18),
348 PORT_DEV_STP_TRGT = (1U << 17),
349 PORT_DEV_SSP_INIT = (1U << 11),
350 PORT_DEV_SMP_INIT = (1U << 10),
351 PORT_DEV_STP_INIT = (1U << 9),
352 PORT_PHY_ID_MASK = (0xFFU << 24),
353 PORT_DEV_TRGT_MASK = (0x7U << 17),
354 PORT_DEV_INIT_MASK = (0x7U << 9),
355 PORT_DEV_TYPE_MASK = (0x7U << 0),
356
357 /* Port n PHY Status */
358 PHY_RDY = (1U << 2),
359 PHY_DW_SYNC = (1U << 1),
360 PHY_OOB_DTCTD = (1U << 0),
361
362 /* VSR */
363 /* PHYMODE 6 (CDB) */
364 PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
365 PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
366 PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
367 PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
368 PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
369 PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
370 PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
371 PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
372 PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
373 PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
374 PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
375 PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
376 PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
377 PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
378};
379
380enum mvs_info_flags {
381 MVF_MSI = (1U << 0), /* MSI is enabled */
382 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
383};
384
385enum sas_cmd_port_registers {
386 CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
387 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
388 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
389 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
390 CMD_OOB_SPACE = 0x110, /* OOB space control register */
391 CMD_OOB_BURST = 0x114, /* OOB burst control register */
392 CMD_PHY_TIMER = 0x118, /* PHY timer control register */
393 CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
394 CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
395 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
396 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
397 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
398 CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
399 CMD_ID_TEST = 0x134, /* ID test register */
400 CMD_PL_TIMER = 0x138, /* PL timer register */
401 CMD_WD_TIMER = 0x13c, /* WD timer register */
402 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
403 CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
404 CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
405 CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
406 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
407 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
408 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
409 CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
410 CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
411 CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
412 CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
413 CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
414 CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
415 CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
416 CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
417 CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
418 CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
419 CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
420 CMD_RESET_COUNT = 0x188, /* Reset Count */
421 CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
422 CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
423 CMD_PHY_CTL = 0x194, /* PHY Control and Status */
424 CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
425 CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
426 CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
427 CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
428 CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
429 CMD_HOST_CTL = 0x1AC, /* Host Control Status */
430 CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
431 CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
432 CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
433 CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
434 CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
435 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
436};
437
438/* SAS/SATA configuration port registers, aka phy registers */
439enum sas_sata_config_port_regs {
440 PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
441 PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
442 PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
443 PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
444 PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
445 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
446 PHYR_SATA_CTL = 0x18, /* SATA control */
447 PHYR_PHY_STAT = 0x1C, /* PHY status */
448 PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
449 PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
450 PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
451 PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
452 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
453 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
454 PHYR_WIDE_PORT = 0x38, /* wide port participating */
455 PHYR_CURRENT0 = 0x80, /* current connection info 0 */
456 PHYR_CURRENT1 = 0x84, /* current connection info 1 */
457 PHYR_CURRENT2 = 0x88, /* current connection info 2 */
458};
459
460/* SAS/SATA Vendor Specific Port Registers */
461enum sas_sata_vsp_regs {
462 VSR_PHY_STAT = 0x00, /* Phy Status */
463 VSR_PHY_MODE1 = 0x01, /* phy tx */
464 VSR_PHY_MODE2 = 0x02, /* tx scc */
465 VSR_PHY_MODE3 = 0x03, /* pll */
466 VSR_PHY_MODE4 = 0x04, /* VCO */
467 VSR_PHY_MODE5 = 0x05, /* Rx */
468 VSR_PHY_MODE6 = 0x06, /* CDR */
469 VSR_PHY_MODE7 = 0x07, /* Impedance */
470 VSR_PHY_MODE8 = 0x08, /* Voltage */
471 VSR_PHY_MODE9 = 0x09, /* Test */
472 VSR_PHY_MODE10 = 0x0A, /* Power */
473 VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
474 VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
475 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
476};
477
478enum pci_cfg_registers {
479 PCR_PHY_CTL = 0x40,
480 PCR_PHY_CTL2 = 0x90,
481 PCR_DEV_CTRL = 0xE8,
482};
483
484enum pci_cfg_register_bits {
485 PCTL_PWR_ON = (0xFU << 24),
486 PCTL_OFF = (0xFU << 12),
487 PRD_REQ_SIZE = (0x4000),
488 PRD_REQ_MASK = (0x00007000),
489};
490
491enum nvram_layout_offsets {
492 NVR_SIG = 0x00, /* 0xAA, 0x55 */
493 NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */
494};
495
496enum chip_flavors {
497 chip_6320,
498 chip_6440,
499 chip_6480,
500};
501
502enum port_type {
503 PORT_TYPE_SAS = (1L << 1),
504 PORT_TYPE_SATA = (1L << 0),
505};
506
507/* Command Table Format */
508enum ct_format {
509 /* SSP */
510 SSP_F_H = 0x00,
511 SSP_F_IU = 0x18,
512 SSP_F_MAX = 0x4D,
513 /* STP */
514 STP_CMD_FIS = 0x00,
515 STP_ATAPI_CMD = 0x40,
516 STP_F_MAX = 0x10,
517 /* SMP */
518 SMP_F_T = 0x00,
519 SMP_F_DEP = 0x01,
520 SMP_F_MAX = 0x101,
521};
522
523enum status_buffer {
524 SB_EIR_OFF = 0x00, /* Error Information Record */
525 SB_RFB_OFF = 0x08, /* Response Frame Buffer */
526 SB_RFB_MAX = 0x400, /* RFB size*/
527};
528
529enum error_info_rec {
530 CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
531 CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
532 RSP_OVER = (1U << 29), /* rsp buffer overflow */
533 RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
534 UNK_FIS = (1U << 27), /* unknown FIS */
535 DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
536 SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
537 TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
538 R_ERR = (1U << 23), /* SATA returned R_ERR prim */
539 RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
540 XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
541 UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
542 DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
543 INTERLOCK = (1U << 15), /* interlock error */
544 NAK = (1U << 14), /* NAK rx'd */
545 ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
546 CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
547 OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
548 PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
549 NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
550 STP_RES_BSY = (1U << 8), /* STP resources busy */
551 BREAK = (1U << 7), /* break received */
552 BAD_DEST = (1U << 6), /* bad destination */
553 BAD_PROTO = (1U << 5), /* protocol not supported */
554 BAD_RATE = (1U << 4), /* cxn rate not supported */
555 WRONG_DEST = (1U << 3), /* wrong destination error */
556 CREDIT_TO = (1U << 2), /* credit timeout */
557 WDOG_TO = (1U << 1), /* watchdog timeout */
558 BUF_PAR = (1U << 0), /* buffer parity error */
559};
560
561enum error_info_rec_2 {
562 SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
563 GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
564 APP_CHK_ERR = (1U << 13), /* Application Check error */
565 REF_CHK_ERR = (1U << 12), /* Reference Check Error */
566 USR_BLK_NM = (1U << 0), /* User Block Number */
567};
568
569struct mvs_chip_info {
570 u32 n_phy;
571 u32 srs_sz;
572 u32 slot_width;
573};
574
575struct mvs_err_info {
576 __le32 flags;
577 __le32 flags2;
578};
579
580struct mvs_prd {
581 __le64 addr; /* 64-bit buffer address */
582 __le32 reserved;
583 __le32 len; /* 16-bit length */
584};
585
586struct mvs_cmd_hdr {
587 __le32 flags; /* PRD tbl len; SAS, SATA ctl */
588 __le32 lens; /* cmd, max resp frame len */
589 __le32 tags; /* targ port xfer tag; tag */
590 __le32 data_len; /* data xfer len */
591 __le64 cmd_tbl; /* command table address */
592 __le64 open_frame; /* open addr frame address */
593 __le64 status_buf; /* status buffer address */
594 __le64 prd_tbl; /* PRD tbl address */
595 __le32 reserved[4];
596};
597
598struct mvs_port {
599 struct asd_sas_port sas_port;
600 u8 port_attached;
601 u8 taskfileset;
602 u8 wide_port_phymap;
603 struct list_head list;
604};
605
606struct mvs_phy {
607 struct mvs_port *port;
608 struct asd_sas_phy sas_phy;
609 struct sas_identify identify;
610 struct scsi_device *sdev;
611 u64 dev_sas_addr;
612 u64 att_dev_sas_addr;
613 u32 att_dev_info;
614 u32 dev_info;
615 u32 phy_type;
616 u32 phy_status;
617 u32 irq_status;
618 u32 frame_rcvd_size;
619 u8 frame_rcvd[32];
620 u8 phy_attached;
621 enum sas_linkrate minimum_linkrate;
622 enum sas_linkrate maximum_linkrate;
623};
624
625struct mvs_slot_info {
626 struct list_head list;
627 struct sas_task *task;
628 u32 n_elem;
629 u32 tx;
630
631 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
632 * and PRD table
633 */
634 void *buf;
635 dma_addr_t buf_dma;
636#if _MV_DUMP
637 u32 cmd_size;
638#endif
639
640 void *response;
641 struct mvs_port *port;
642};
643
644struct mvs_info {
645 unsigned long flags;
646
647 spinlock_t lock; /* host-wide lock */
648 struct pci_dev *pdev; /* our device */
649 void __iomem *regs; /* enhanced mode registers */
650 void __iomem *peri_regs; /* peripheral registers */
651
652 u8 sas_addr[SAS_ADDR_SIZE];
653 struct sas_ha_struct sas; /* SCSI/SAS glue */
654 struct Scsi_Host *shost;
655
656 __le32 *tx; /* TX (delivery) DMA ring */
657 dma_addr_t tx_dma;
658 u32 tx_prod; /* cached next-producer idx */
659
660 __le32 *rx; /* RX (completion) DMA ring */
661 dma_addr_t rx_dma;
662 u32 rx_cons; /* RX consumer idx */
663
664 __le32 *rx_fis; /* RX'd FIS area */
665 dma_addr_t rx_fis_dma;
666
667 struct mvs_cmd_hdr *slot; /* DMA command header slots */
668 dma_addr_t slot_dma;
669
670 const struct mvs_chip_info *chip;
671
672 u8 tags[MVS_SLOTS];
673 struct mvs_slot_info slot_info[MVS_SLOTS];
674 /* further per-slot information */
675 struct mvs_phy phy[MVS_MAX_PHYS];
676 struct mvs_port port[MVS_MAX_PHYS];
677#ifdef MVS_USE_TASKLET
678 struct tasklet_struct tasklet;
679#endif
680};
681
682static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
683 void *funcdata);
684static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
685static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
686static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
687static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
688static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
689static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
690
691static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
692static void mvs_detect_porttype(struct mvs_info *mvi, int i);
693static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
694static void mvs_release_task(struct mvs_info *mvi, int phy_no);
695
696static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
697static void mvs_scan_start(struct Scsi_Host *);
698static int mvs_slave_configure(struct scsi_device *sdev);
699
700static struct scsi_transport_template *mvs_stt;
701
702static const struct mvs_chip_info mvs_chips[] = {
703 [chip_6320] = { 2, 16, 9 },
704 [chip_6440] = { 4, 16, 9 },
705 [chip_6480] = { 8, 32, 10 },
706};
707
708static struct scsi_host_template mvs_sht = {
709 .module = THIS_MODULE,
710 .name = DRV_NAME,
711 .queuecommand = sas_queuecommand,
712 .target_alloc = sas_target_alloc,
713 .slave_configure = mvs_slave_configure,
714 .slave_destroy = sas_slave_destroy,
715 .scan_finished = mvs_scan_finished,
716 .scan_start = mvs_scan_start,
717 .change_queue_depth = sas_change_queue_depth,
718 .change_queue_type = sas_change_queue_type,
719 .bios_param = sas_bios_param,
720 .can_queue = 1,
721 .cmd_per_lun = 1,
722 .this_id = -1,
723 .sg_tablesize = SG_ALL,
724 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
725 .use_clustering = ENABLE_CLUSTERING,
726 .eh_device_reset_handler = sas_eh_device_reset_handler,
727 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
728 .slave_alloc = sas_slave_alloc,
729 .target_destroy = sas_target_destroy,
730 .ioctl = sas_ioctl,
731};
732
733static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
734{
735 u32 i;
736 u32 run;
737 u32 offset;
738
739 offset = 0;
740 while (size) {
741 printk("%08X : ", baseaddr + offset);
742 if (size >= 16)
743 run = 16;
744 else
745 run = size;
746 size -= run;
747 for (i = 0; i < 16; i++) {
748 if (i < run)
749 printk("%02X ", (u32)data[i]);
750 else
751 printk(" ");
752 }
753 printk(": ");
754 for (i = 0; i < run; i++)
755 printk("%c", isalnum(data[i]) ? data[i] : '.');
756 printk("\n");
757 data = &data[16];
758 offset += run;
759 }
760 printk("\n");
761}
762
763#if _MV_DUMP
764static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
765 enum sas_protocol proto)
766{
767 u32 offset;
768 struct pci_dev *pdev = mvi->pdev;
769 struct mvs_slot_info *slot = &mvi->slot_info[tag];
770
771 offset = slot->cmd_size + MVS_OAF_SZ +
772 sizeof(struct mvs_prd) * slot->n_elem;
773 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
774 tag);
775 mvs_hexdump(32, (u8 *) slot->response,
776 (u32) slot->buf_dma + offset);
777}
778#endif
779
780static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
781 enum sas_protocol proto)
782{
783#if _MV_DUMP
784 u32 sz, w_ptr;
785 u64 addr;
786 void __iomem *regs = mvi->regs;
787 struct pci_dev *pdev = mvi->pdev;
788 struct mvs_slot_info *slot = &mvi->slot_info[tag];
789
790 /*Delivery Queue */
791 sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
792 w_ptr = slot->tx;
793 addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
794 dev_printk(KERN_DEBUG, &pdev->dev,
795 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
796 dev_printk(KERN_DEBUG, &pdev->dev,
797 "Delivery Queue Base Address=0x%llX (PA)"
798 "(tx_dma=0x%llX), Entry=%04d\n",
799 addr, mvi->tx_dma, w_ptr);
800 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
801 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
802 /*Command List */
803 addr = mvi->slot_dma;
804 dev_printk(KERN_DEBUG, &pdev->dev,
805 "Command List Base Address=0x%llX (PA)"
806 "(slot_dma=0x%llX), Header=%03d\n",
807 addr, slot->buf_dma, tag);
808 dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
809 /*mvs_cmd_hdr */
810 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
811 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
812 /*1.command table area */
813 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
814 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
815 /*2.open address frame area */
816 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
817 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
818 (u32) slot->buf_dma + slot->cmd_size);
819 /*3.status buffer */
820 mvs_hba_sb_dump(mvi, tag, proto);
821 /*4.PRD table */
822 dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
823 mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
824 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
825 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
826#endif
827}
828
829static void mvs_hba_cq_dump(struct mvs_info *mvi)
830{
831#if (_MV_DUMP > 2)
832 u64 addr;
833 void __iomem *regs = mvi->regs;
834 struct pci_dev *pdev = mvi->pdev;
835 u32 entry = mvi->rx_cons + 1;
836 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
837
838 /*Completion Queue */
839 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
840 dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n",
841 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
842 dev_printk(KERN_DEBUG, &pdev->dev,
843 "Completion List Base Address=0x%llX (PA), "
844 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
845 addr, entry - 1, mvi->rx[0]);
846 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
847 mvi->rx_dma + sizeof(u32) * entry);
848#endif
849}
850
851static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
852{
853 void __iomem *regs = mvi->regs;
854 u32 tmp;
855
856 tmp = mr32(GBL_CTL);
857
858 mw32(GBL_CTL, tmp | INT_EN);
859}
860
861static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
862{
863 void __iomem *regs = mvi->regs;
864 u32 tmp;
865
866 tmp = mr32(GBL_CTL);
867
868 mw32(GBL_CTL, tmp & ~INT_EN);
869}
870
871static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
872
873/* move to PCI layer or libata core? */
874static int pci_go_64(struct pci_dev *pdev)
875{
876 int rc;
877
878 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
879 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
880 if (rc) {
881 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
882 if (rc) {
883 dev_printk(KERN_ERR, &pdev->dev,
884 "64-bit DMA enable failed\n");
885 return rc;
886 }
887 }
888 } else {
889 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
890 if (rc) {
891 dev_printk(KERN_ERR, &pdev->dev,
892 "32-bit DMA enable failed\n");
893 return rc;
894 }
895 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
896 if (rc) {
897 dev_printk(KERN_ERR, &pdev->dev,
898 "32-bit consistent DMA enable failed\n");
899 return rc;
900 }
901 }
902
903 return rc;
904}
905
906static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
907{
908 if (task->lldd_task) {
909 struct mvs_slot_info *slot;
910 slot = (struct mvs_slot_info *) task->lldd_task;
911 *tag = slot - mvi->slot_info;
912 return 1;
913 }
914 return 0;
915}
916
917static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
918{
919 void *bitmap = (void *) &mvi->tags;
920 clear_bit(tag, bitmap);
921}
922
923static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
924{
925 mvs_tag_clear(mvi, tag);
926}
927
928static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
929{
930 void *bitmap = (void *) &mvi->tags;
931 set_bit(tag, bitmap);
932}
933
934static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
935{
936 unsigned int index, tag;
937 void *bitmap = (void *) &mvi->tags;
938
939 index = find_first_zero_bit(bitmap, MVS_SLOTS);
940 tag = index;
941 if (tag >= MVS_SLOTS)
942 return -SAS_QUEUE_FULL;
943 mvs_tag_set(mvi, tag);
944 *tag_out = tag;
945 return 0;
946}
947
948static void mvs_tag_init(struct mvs_info *mvi)
949{
950 int i;
951 for (i = 0; i < MVS_SLOTS; ++i)
952 mvs_tag_clear(mvi, i);
953}
954
955#ifndef MVS_DISABLE_NVRAM
956static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
957{
958 int timeout = 1000;
959
960 if (addr & ~SPI_ADDR_MASK)
961 return -EINVAL;
962
963 writel(addr, regs + SPI_CMD);
964 writel(TWSI_RD, regs + SPI_CTL);
965
966 while (timeout-- > 0) {
967 if (readl(regs + SPI_CTL) & TWSI_RDY) {
968 *data = readl(regs + SPI_DATA);
969 return 0;
970 }
971
972 udelay(10);
973 }
974
975 return -EBUSY;
976}
977
978static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
979 void *buf, u32 buflen)
980{
981 u32 addr_end, tmp_addr, i, j;
982 u32 tmp = 0;
983 int rc;
984 u8 *tmp8, *buf8 = buf;
985
986 addr_end = addr + buflen;
987 tmp_addr = ALIGN(addr, 4);
988 if (addr > 0xff)
989 return -EINVAL;
990
991 j = addr & 0x3;
992 if (j) {
993 rc = mvs_eep_read(regs, tmp_addr, &tmp);
994 if (rc)
995 return rc;
996
997 tmp8 = (u8 *)&tmp;
998 for (i = j; i < 4; i++)
999 *buf8++ = tmp8[i];
1000
1001 tmp_addr += 4;
1002 }
1003
1004 for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
1005 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1006 if (rc)
1007 return rc;
1008
1009 memcpy(buf8, &tmp, 4);
1010 buf8 += 4;
1011 }
1012
1013 if (tmp_addr < addr_end) {
1014 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1015 if (rc)
1016 return rc;
1017
1018 tmp8 = (u8 *)&tmp;
1019 j = addr_end - tmp_addr;
1020 for (i = 0; i < j; i++)
1021 *buf8++ = tmp8[i];
1022
1023 tmp_addr += 4;
1024 }
1025
1026 return 0;
1027}
1028#endif
1029
1030static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
1031 void *buf, u32 buflen)
1032{
1033#ifndef MVS_DISABLE_NVRAM
1034 void __iomem *regs = mvi->regs;
1035 int rc, i;
1036 u32 sum;
1037 u8 hdr[2], *tmp;
1038 const char *msg;
1039
1040 rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
1041 if (rc) {
1042 msg = "nvram hdr read failed";
1043 goto err_out;
1044 }
1045 rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
1046 if (rc) {
1047 msg = "nvram read failed";
1048 goto err_out;
1049 }
1050
1051 if (hdr[0] != 0x5A) {
1052 /* entry id */
1053 msg = "invalid nvram entry id";
1054 rc = -ENOENT;
1055 goto err_out;
1056 }
1057
1058 tmp = buf;
1059 sum = ((u32)hdr[0]) + ((u32)hdr[1]);
1060 for (i = 0; i < buflen; i++)
1061 sum += ((u32)tmp[i]);
1062
1063 if (sum) {
1064 msg = "nvram checksum failure";
1065 rc = -EILSEQ;
1066 goto err_out;
1067 }
1068
1069 return 0;
1070
1071err_out:
1072 dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
1073 return rc;
1074#else
1075 /* FIXME , For SAS target mode */
1076 memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
1077 return 0;
1078#endif
1079}
1080
1081static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
1082{
1083 struct mvs_phy *phy = &mvi->phy[i];
1084 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
1085
1086 if (!phy->phy_attached)
1087 return;
1088
1089 if (sas_phy->phy) {
1090 struct sas_phy *sphy = sas_phy->phy;
1091
1092 sphy->negotiated_linkrate = sas_phy->linkrate;
1093 sphy->minimum_linkrate = phy->minimum_linkrate;
1094 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
1095 sphy->maximum_linkrate = phy->maximum_linkrate;
1096 sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
1097 }
1098
1099 if (phy->phy_type & PORT_TYPE_SAS) {
1100 struct sas_identify_frame *id;
1101
1102 id = (struct sas_identify_frame *)phy->frame_rcvd;
1103 id->dev_type = phy->identify.device_type;
1104 id->initiator_bits = SAS_PROTOCOL_ALL;
1105 id->target_bits = phy->identify.target_port_protocols;
1106 } else if (phy->phy_type & PORT_TYPE_SATA) {
1107 /* TODO */
1108 }
1109 mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
1110 mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
1111 PORTE_BYTES_DMAED);
1112}
1113
1114static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
1115{
1116 /* give the phy enabling interrupt event time to come in (1s
1117 * is empirically about all it takes) */
1118 if (time < HZ)
1119 return 0;
1120 /* Wait for discovery to finish */
1121 scsi_flush_work(shost);
1122 return 1;
1123}
1124
1125static void mvs_scan_start(struct Scsi_Host *shost)
1126{
1127 int i;
1128 struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
1129
1130 for (i = 0; i < mvi->chip->n_phy; ++i) {
1131 mvs_bytes_dmaed(mvi, i);
1132 }
1133}
1134
1135static int mvs_slave_configure(struct scsi_device *sdev)
1136{
1137 struct domain_device *dev = sdev_to_domain_dev(sdev);
1138 int ret = sas_slave_configure(sdev);
1139
1140 if (ret)
1141 return ret;
1142
1143 if (dev_is_sata(dev)) {
1144 /* struct ata_port *ap = dev->sata_dev.ap; */
1145 /* struct ata_device *adev = ap->link.device; */
1146
1147 /* clamp at no NCQ for the time being */
1148 /* adev->flags |= ATA_DFLAG_NCQ_OFF; */
1149 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
1150 }
1151 return 0;
1152}
1153
1154static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
1155{
1156 struct pci_dev *pdev = mvi->pdev;
1157 struct sas_ha_struct *sas_ha = &mvi->sas;
1158 struct mvs_phy *phy = &mvi->phy[phy_no];
1159 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1160
1161 phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no);
1162 /*
1163 * events is port event now ,
1164 * we need check the interrupt status which belongs to per port.
1165 */
1166 dev_printk(KERN_DEBUG, &pdev->dev,
1167 "Port %d Event = %X\n",
1168 phy_no, phy->irq_status);
1169
1170 if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
1171 mvs_release_task(mvi, phy_no);
1172 if (!mvs_is_phy_ready(mvi, phy_no)) {
1173 sas_phy_disconnected(sas_phy);
1174 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1175 dev_printk(KERN_INFO, &pdev->dev,
1176 "Port %d Unplug Notice\n", phy_no);
1177
1178 } else
1179 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
1180 }
1181 if (!(phy->irq_status & PHYEV_DEC_ERR)) {
1182 if (phy->irq_status & PHYEV_COMWAKE) {
1183 u32 tmp = mvs_read_port_irq_mask(mvi, phy_no);
1184 mvs_write_port_irq_mask(mvi, phy_no,
1185 tmp | PHYEV_SIG_FIS);
1186 }
1187 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
1188 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
1189 if (phy->phy_status) {
1190 mvs_detect_porttype(mvi, phy_no);
1191
1192 if (phy->phy_type & PORT_TYPE_SATA) {
1193 u32 tmp = mvs_read_port_irq_mask(mvi,
1194 phy_no);
1195 tmp &= ~PHYEV_SIG_FIS;
1196 mvs_write_port_irq_mask(mvi,
1197 phy_no, tmp);
1198 }
1199
1200 mvs_update_phyinfo(mvi, phy_no, 0);
1201 sas_ha->notify_phy_event(sas_phy,
1202 PHYE_OOB_DONE);
1203 mvs_bytes_dmaed(mvi, phy_no);
1204 } else {
1205 dev_printk(KERN_DEBUG, &pdev->dev,
1206 "plugin interrupt but phy is gone\n");
1207 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
1208 NULL);
1209 }
1210 } else if (phy->irq_status & PHYEV_BROAD_CH) {
1211 mvs_release_task(mvi, phy_no);
1212 sas_ha->notify_port_event(sas_phy,
1213 PORTE_BROADCAST_RCVD);
1214 }
1215 }
1216 mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status);
1217}
1218
1219static void mvs_int_sata(struct mvs_info *mvi)
1220{
1221 u32 tmp;
1222 void __iomem *regs = mvi->regs;
1223 tmp = mr32(INT_STAT_SRS);
1224 mw32(INT_STAT_SRS, tmp & 0xFFFF);
1225}
1226
1227static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
1228 u32 slot_idx)
1229{
1230 void __iomem *regs = mvi->regs;
1231 struct domain_device *dev = task->dev;
1232 struct asd_sas_port *sas_port = dev->port;
1233 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1234 u32 reg_set, phy_mask;
1235
1236 if (!sas_protocol_ata(task->task_proto)) {
1237 reg_set = 0;
1238 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
1239 sas_port->phy_mask;
1240 } else {
1241 reg_set = port->taskfileset;
1242 phy_mask = sas_port->phy_mask;
1243 }
1244 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx |
1245 (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) |
1246 (phy_mask << TXQ_PHY_SHIFT) |
1247 (reg_set << TXQ_SRS_SHIFT));
1248
1249 mw32(TX_PROD_IDX, mvi->tx_prod);
1250 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1251}
1252
1253static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1254 u32 slot_idx, int err)
1255{
1256 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1257 struct task_status_struct *tstat = &task->task_status;
1258 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1259 int stat = SAM_GOOD;
1260
1261 resp->frame_len = sizeof(struct dev_to_host_fis);
1262 memcpy(&resp->ending_fis[0],
1263 SATA_RECEIVED_D2H_FIS(port->taskfileset),
1264 sizeof(struct dev_to_host_fis));
1265 tstat->buf_valid_size = sizeof(*resp);
1266 if (unlikely(err))
1267 stat = SAS_PROTO_RESPONSE;
1268 return stat;
1269}
1270
1271static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
1272{
1273 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1274 mvs_tag_clear(mvi, slot_idx);
1275}
1276
1277static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1278 struct mvs_slot_info *slot, u32 slot_idx)
1279{
1280 if (!sas_protocol_ata(task->task_proto))
1281 if (slot->n_elem)
1282 pci_unmap_sg(mvi->pdev, task->scatter,
1283 slot->n_elem, task->data_dir);
1284
1285 switch (task->task_proto) {
1286 case SAS_PROTOCOL_SMP:
1287 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
1288 PCI_DMA_FROMDEVICE);
1289 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
1290 PCI_DMA_TODEVICE);
1291 break;
1292
1293 case SAS_PROTOCOL_SATA:
1294 case SAS_PROTOCOL_STP:
1295 case SAS_PROTOCOL_SSP:
1296 default:
1297 /* do nothing */
1298 break;
1299 }
1300 list_del(&slot->list);
1301 task->lldd_task = NULL;
1302 slot->task = NULL;
1303 slot->port = NULL;
1304}
1305
1306static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1307 u32 slot_idx)
1308{
1309 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1310 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
1311 u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4));
1312 int stat = SAM_CHECK_COND;
1313
1314 if (err_dw1 & SLOT_BSY_ERR) {
1315 stat = SAS_QUEUE_FULL;
1316 mvs_slot_reset(mvi, task, slot_idx);
1317 }
1318 switch (task->task_proto) {
1319 case SAS_PROTOCOL_SSP:
1320 break;
1321 case SAS_PROTOCOL_SMP:
1322 break;
1323 case SAS_PROTOCOL_SATA:
1324 case SAS_PROTOCOL_STP:
1325 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1326 if (err_dw0 & TFILE_ERR)
1327 stat = mvs_sata_done(mvi, task, slot_idx, 1);
1328 break;
1329 default:
1330 break;
1331 }
1332
1333 mvs_hexdump(16, (u8 *) slot->response, 0);
1334 return stat;
1335}
1336
1337static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1338{
1339 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1340 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1341 struct sas_task *task = slot->task;
1342 struct task_status_struct *tstat;
1343 struct mvs_port *port;
1344 bool aborted;
1345 void *to;
1346
1347 if (unlikely(!task || !task->lldd_task))
1348 return -1;
1349
1350 mvs_hba_cq_dump(mvi);
1351
1352 spin_lock(&task->task_state_lock);
1353 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1354 if (!aborted) {
1355 task->task_state_flags &=
1356 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1357 task->task_state_flags |= SAS_TASK_STATE_DONE;
1358 }
1359 spin_unlock(&task->task_state_lock);
1360
1361 if (aborted) {
1362 mvs_slot_task_free(mvi, task, slot, slot_idx);
1363 mvs_slot_free(mvi, rx_desc);
1364 return -1;
1365 }
1366
1367 port = slot->port;
1368 tstat = &task->task_status;
1369 memset(tstat, 0, sizeof(*tstat));
1370 tstat->resp = SAS_TASK_COMPLETE;
1371
1372 if (unlikely(!port->port_attached || flags)) {
1373 mvs_slot_err(mvi, task, slot_idx);
1374 if (!sas_protocol_ata(task->task_proto))
1375 tstat->stat = SAS_PHY_DOWN;
1376 goto out;
1377 }
1378
1379 /* error info record present */
1380 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1381 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1382 goto out;
1383 }
1384
1385 switch (task->task_proto) {
1386 case SAS_PROTOCOL_SSP:
1387 /* hw says status == 0, datapres == 0 */
1388 if (rx_desc & RXQ_GOOD) {
1389 tstat->stat = SAM_GOOD;
1390 tstat->resp = SAS_TASK_COMPLETE;
1391 }
1392 /* response frame present */
1393 else if (rx_desc & RXQ_RSP) {
1394 struct ssp_response_iu *iu =
1395 slot->response + sizeof(struct mvs_err_info);
1396 sas_ssp_task_response(&mvi->pdev->dev, task, iu);
1397 }
1398
1399 /* should never happen? */
1400 else
1401 tstat->stat = SAM_CHECK_COND;
1402 break;
1403
1404 case SAS_PROTOCOL_SMP: {
1405 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1406 tstat->stat = SAM_GOOD;
1407 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1408 memcpy(to + sg_resp->offset,
1409 slot->response + sizeof(struct mvs_err_info),
1410 sg_dma_len(sg_resp));
1411 kunmap_atomic(to, KM_IRQ0);
1412 break;
1413 }
1414
1415 case SAS_PROTOCOL_SATA:
1416 case SAS_PROTOCOL_STP:
1417 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1418 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
1419 break;
1420 }
1421
1422 default:
1423 tstat->stat = SAM_CHECK_COND;
1424 break;
1425 }
1426
1427out:
1428 mvs_slot_task_free(mvi, task, slot, slot_idx);
1429 if (unlikely(tstat->stat != SAS_QUEUE_FULL))
1430 mvs_slot_free(mvi, rx_desc);
1431
1432 spin_unlock(&mvi->lock);
1433 task->task_done(task);
1434 spin_lock(&mvi->lock);
1435 return tstat->stat;
1436}
1437
1438static void mvs_release_task(struct mvs_info *mvi, int phy_no)
1439{
1440 struct list_head *pos, *n;
1441 struct mvs_slot_info *slot;
1442 struct mvs_phy *phy = &mvi->phy[phy_no];
1443 struct mvs_port *port = phy->port;
1444 u32 rx_desc;
1445
1446 if (!port)
1447 return;
1448
1449 list_for_each_safe(pos, n, &port->list) {
1450 slot = container_of(pos, struct mvs_slot_info, list);
1451 rx_desc = (u32) (slot - mvi->slot_info);
1452 mvs_slot_complete(mvi, rx_desc, 1);
1453 }
1454}
1455
1456static void mvs_int_full(struct mvs_info *mvi)
1457{
1458 void __iomem *regs = mvi->regs;
1459 u32 tmp, stat;
1460 int i;
1461
1462 stat = mr32(INT_STAT);
1463
1464 mvs_int_rx(mvi, false);
1465
1466 for (i = 0; i < MVS_MAX_PORTS; i++) {
1467 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
1468 if (tmp)
1469 mvs_int_port(mvi, i, tmp);
1470 }
1471
1472 if (stat & CINT_SRS)
1473 mvs_int_sata(mvi);
1474
1475 mw32(INT_STAT, stat);
1476}
1477
1478static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
1479{
1480 void __iomem *regs = mvi->regs;
1481 u32 rx_prod_idx, rx_desc;
1482 bool attn = false;
1483 struct pci_dev *pdev = mvi->pdev;
1484
1485 /* the first dword in the RX ring is special: it contains
1486 * a mirror of the hardware's RX producer index, so that
1487 * we don't have to stall the CPU reading that register.
1488 * The actual RX ring is offset by one dword, due to this.
1489 */
1490 rx_prod_idx = mvi->rx_cons;
1491 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
1492 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
1493 return 0;
1494
1495 /* The CMPL_Q may come late, read from register and try again
1496 * note: if coalescing is enabled,
1497 * it will need to read from register every time for sure
1498 */
1499 if (mvi->rx_cons == rx_prod_idx)
1500 mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
1501
1502 if (mvi->rx_cons == rx_prod_idx)
1503 return 0;
1504
1505 while (mvi->rx_cons != rx_prod_idx) {
1506
1507 /* increment our internal RX consumer pointer */
1508 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
1509
1510 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
1511
1512 if (likely(rx_desc & RXQ_DONE))
1513 mvs_slot_complete(mvi, rx_desc, 0);
1514 if (rx_desc & RXQ_ATTN) {
1515 attn = true;
1516 dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
1517 rx_desc);
1518 } else if (rx_desc & RXQ_ERR) {
1519 if (!(rx_desc & RXQ_DONE))
1520 mvs_slot_complete(mvi, rx_desc, 0);
1521 dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
1522 rx_desc);
1523 } else if (rx_desc & RXQ_SLOT_RESET) {
1524 dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n",
1525 rx_desc);
1526 mvs_slot_free(mvi, rx_desc);
1527 }
1528 }
1529
1530 if (attn && self_clear)
1531 mvs_int_full(mvi);
1532
1533 return 0;
1534}
1535
1536#ifdef MVS_USE_TASKLET
1537static void mvs_tasklet(unsigned long data)
1538{
1539 struct mvs_info *mvi = (struct mvs_info *) data;
1540 unsigned long flags;
1541
1542 spin_lock_irqsave(&mvi->lock, flags);
1543
1544#ifdef MVS_DISABLE_MSI
1545 mvs_int_full(mvi);
1546#else
1547 mvs_int_rx(mvi, true);
1548#endif
1549 spin_unlock_irqrestore(&mvi->lock, flags);
1550}
1551#endif
1552
1553static irqreturn_t mvs_interrupt(int irq, void *opaque)
1554{
1555 struct mvs_info *mvi = opaque;
1556 void __iomem *regs = mvi->regs;
1557 u32 stat;
1558
1559 stat = mr32(GBL_INT_STAT);
1560
1561 if (stat == 0 || stat == 0xffffffff)
1562 return IRQ_NONE;
1563
1564 /* clear CMD_CMPLT ASAP */
1565 mw32_f(INT_STAT, CINT_DONE);
1566
1567#ifndef MVS_USE_TASKLET
1568 spin_lock(&mvi->lock);
1569
1570 mvs_int_full(mvi);
1571
1572 spin_unlock(&mvi->lock);
1573#else
1574 tasklet_schedule(&mvi->tasklet);
1575#endif
1576 return IRQ_HANDLED;
1577}
1578
1579#ifndef MVS_DISABLE_MSI
1580static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
1581{
1582 struct mvs_info *mvi = opaque;
1583
1584#ifndef MVS_USE_TASKLET
1585 spin_lock(&mvi->lock);
1586
1587 mvs_int_rx(mvi, true);
1588
1589 spin_unlock(&mvi->lock);
1590#else
1591 tasklet_schedule(&mvi->tasklet);
1592#endif
1593 return IRQ_HANDLED;
1594}
1595#endif
1596
1597struct mvs_task_exec_info {
1598 struct sas_task *task;
1599 struct mvs_cmd_hdr *hdr;
1600 struct mvs_port *port;
1601 u32 tag;
1602 int n_elem;
1603};
1604
1605static int mvs_task_prep_smp(struct mvs_info *mvi,
1606 struct mvs_task_exec_info *tei)
1607{
1608 int elem, rc, i;
1609 struct sas_task *task = tei->task;
1610 struct mvs_cmd_hdr *hdr = tei->hdr;
1611 struct scatterlist *sg_req, *sg_resp;
1612 u32 req_len, resp_len, tag = tei->tag;
1613 void *buf_tmp;
1614 u8 *buf_oaf;
1615 dma_addr_t buf_tmp_dma;
1616 struct mvs_prd *buf_prd;
1617 struct scatterlist *sg;
1618 struct mvs_slot_info *slot = &mvi->slot_info[tag];
1619 struct asd_sas_port *sas_port = task->dev->port;
1620 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1621#if _MV_DUMP
1622 u8 *buf_cmd;
1623 void *from;
1624#endif
1625 /*
1626 * DMA-map SMP request, response buffers
1627 */
1628 sg_req = &task->smp_task.smp_req;
1629 elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
1630 if (!elem)
1631 return -ENOMEM;
1632 req_len = sg_dma_len(sg_req);
1633
1634 sg_resp = &task->smp_task.smp_resp;
1635 elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
1636 if (!elem) {
1637 rc = -ENOMEM;
1638 goto err_out;
1639 }
1640 resp_len = sg_dma_len(sg_resp);
1641
1642 /* must be in dwords */
1643 if ((req_len & 0x3) || (resp_len & 0x3)) {
1644 rc = -EINVAL;
1645 goto err_out_2;
1646 }
1647
1648 /*
1649 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1650 */
1651
1652 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1653 buf_tmp = slot->buf;
1654 buf_tmp_dma = slot->buf_dma;
1655
1656#if _MV_DUMP
1657 buf_cmd = buf_tmp;
1658 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1659 buf_tmp += req_len;
1660 buf_tmp_dma += req_len;
1661 slot->cmd_size = req_len;
1662#else
1663 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
1664#endif
1665
1666 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1667 buf_oaf = buf_tmp;
1668 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1669
1670 buf_tmp += MVS_OAF_SZ;
1671 buf_tmp_dma += MVS_OAF_SZ;
1672
1673 /* region 3: PRD table ********************************************* */
1674 buf_prd = buf_tmp;
1675 if (tei->n_elem)
1676 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1677 else
1678 hdr->prd_tbl = 0;
1679
1680 i = sizeof(struct mvs_prd) * tei->n_elem;
1681 buf_tmp += i;
1682 buf_tmp_dma += i;
1683
1684 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1685 slot->response = buf_tmp;
1686 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1687
1688 /*
1689 * Fill in TX ring and command slot header
1690 */
1691 slot->tx = mvi->tx_prod;
1692 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
1693 TXQ_MODE_I | tag |
1694 (sas_port->phy_mask << TXQ_PHY_SHIFT));
1695
1696 hdr->flags |= flags;
1697 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
1698 hdr->tags = cpu_to_le32(tag);
1699 hdr->data_len = 0;
1700
1701 /* generate open address frame hdr (first 12 bytes) */
1702 buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
1703 buf_oaf[1] = task->dev->linkrate & 0xf;
1704 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
1705 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1706
1707 /* fill in PRD (scatter/gather) table, if any */
1708 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1709 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1710 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1711 buf_prd++;
1712 }
1713
1714#if _MV_DUMP
1715 /* copy cmd table */
1716 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
1717 memcpy(buf_cmd, from + sg_req->offset, req_len);
1718 kunmap_atomic(from, KM_IRQ0);
1719#endif
1720 return 0;
1721
1722err_out_2:
1723 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
1724 PCI_DMA_FROMDEVICE);
1725err_out:
1726 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
1727 PCI_DMA_TODEVICE);
1728 return rc;
1729}
1730
1731static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1732{
1733 void __iomem *regs = mvi->regs;
1734 u32 tmp, offs;
1735 u8 *tfs = &port->taskfileset;
1736
1737 if (*tfs == MVS_ID_NOT_MAPPED)
1738 return;
1739
1740 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1741 if (*tfs < 16) {
1742 tmp = mr32(PCS);
1743 mw32(PCS, tmp & ~offs);
1744 } else {
1745 tmp = mr32(CTL);
1746 mw32(CTL, tmp & ~offs);
1747 }
1748
1749 tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
1750 if (tmp)
1751 mw32(INT_STAT_SRS, tmp);
1752
1753 *tfs = MVS_ID_NOT_MAPPED;
1754}
1755
1756static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1757{
1758 int i;
1759 u32 tmp, offs;
1760 void __iomem *regs = mvi->regs;
1761
1762 if (port->taskfileset != MVS_ID_NOT_MAPPED)
1763 return 0;
1764
1765 tmp = mr32(PCS);
1766
1767 for (i = 0; i < mvi->chip->srs_sz; i++) {
1768 if (i == 16)
1769 tmp = mr32(CTL);
1770 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1771 if (!(tmp & offs)) {
1772 port->taskfileset = i;
1773
1774 if (i < 16)
1775 mw32(PCS, tmp | offs);
1776 else
1777 mw32(CTL, tmp | offs);
1778 tmp = mr32(INT_STAT_SRS) & (1U << i);
1779 if (tmp)
1780 mw32(INT_STAT_SRS, tmp);
1781 return 0;
1782 }
1783 }
1784 return MVS_ID_NOT_MAPPED;
1785}
1786
1787static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
1788{
1789 struct ata_queued_cmd *qc = task->uldd_task;
1790
1791 if (qc) {
1792 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
1793 qc->tf.command == ATA_CMD_FPDMA_READ) {
1794 *tag = qc->tag;
1795 return 1;
1796 }
1797 }
1798
1799 return 0;
1800}
1801
1802static int mvs_task_prep_ata(struct mvs_info *mvi,
1803 struct mvs_task_exec_info *tei)
1804{
1805 struct sas_task *task = tei->task;
1806 struct domain_device *dev = task->dev;
1807 struct mvs_cmd_hdr *hdr = tei->hdr;
1808 struct asd_sas_port *sas_port = dev->port;
1809 struct mvs_slot_info *slot;
1810 struct scatterlist *sg;
1811 struct mvs_prd *buf_prd;
1812 struct mvs_port *port = tei->port;
1813 u32 tag = tei->tag;
1814 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1815 void *buf_tmp;
1816 u8 *buf_cmd, *buf_oaf;
1817 dma_addr_t buf_tmp_dma;
1818 u32 i, req_len, resp_len;
1819 const u32 max_resp_len = SB_RFB_MAX;
1820
1821 if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
1822 return -EBUSY;
1823
1824 slot = &mvi->slot_info[tag];
1825 slot->tx = mvi->tx_prod;
1826 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1827 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
1828 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
1829 (port->taskfileset << TXQ_SRS_SHIFT));
1830
1831 if (task->ata_task.use_ncq)
1832 flags |= MCH_FPDMA;
1833 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
1834 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
1835 flags |= MCH_ATAPI;
1836 }
1837
1838 /* FIXME: fill in port multiplier number */
1839
1840 hdr->flags = cpu_to_le32(flags);
1841
1842 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
1843 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags))
1844 task->ata_task.fis.sector_count |= hdr->tags << 3;
1845 else
1846 hdr->tags = cpu_to_le32(tag);
1847 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1848
1849 /*
1850 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1851 */
1852
1853 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
1854 buf_cmd = buf_tmp = slot->buf;
1855 buf_tmp_dma = slot->buf_dma;
1856
1857 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1858
1859 buf_tmp += MVS_ATA_CMD_SZ;
1860 buf_tmp_dma += MVS_ATA_CMD_SZ;
1861#if _MV_DUMP
1862 slot->cmd_size = MVS_ATA_CMD_SZ;
1863#endif
1864
1865 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1866 /* used for STP. unused for SATA? */
1867 buf_oaf = buf_tmp;
1868 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1869
1870 buf_tmp += MVS_OAF_SZ;
1871 buf_tmp_dma += MVS_OAF_SZ;
1872
1873 /* region 3: PRD table ********************************************* */
1874 buf_prd = buf_tmp;
1875 if (tei->n_elem)
1876 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1877 else
1878 hdr->prd_tbl = 0;
1879
1880 i = sizeof(struct mvs_prd) * tei->n_elem;
1881 buf_tmp += i;
1882 buf_tmp_dma += i;
1883
1884 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1885 /* FIXME: probably unused, for SATA. kept here just in case
1886 * we get a STP/SATA error information record
1887 */
1888 slot->response = buf_tmp;
1889 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1890
1891 req_len = sizeof(struct host_to_dev_fis);
1892 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
1893 sizeof(struct mvs_err_info) - i;
1894
1895 /* request, response lengths */
1896 resp_len = min(resp_len, max_resp_len);
1897 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
1898
1899 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
1900 /* fill in command FIS and ATAPI CDB */
1901 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
1902 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
1903 memcpy(buf_cmd + STP_ATAPI_CMD,
1904 task->ata_task.atapi_packet, 16);
1905
1906 /* generate open address frame hdr (first 12 bytes) */
1907 buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
1908 buf_oaf[1] = task->dev->linkrate & 0xf;
1909 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
1910 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1911
1912 /* fill in PRD (scatter/gather) table, if any */
1913 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1914 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1915 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1916 buf_prd++;
1917 }
1918
1919 return 0;
1920}
1921
1922static int mvs_task_prep_ssp(struct mvs_info *mvi,
1923 struct mvs_task_exec_info *tei)
1924{
1925 struct sas_task *task = tei->task;
1926 struct mvs_cmd_hdr *hdr = tei->hdr;
1927 struct mvs_port *port = tei->port;
1928 struct mvs_slot_info *slot;
1929 struct scatterlist *sg;
1930 struct mvs_prd *buf_prd;
1931 struct ssp_frame_hdr *ssp_hdr;
1932 void *buf_tmp;
1933 u8 *buf_cmd, *buf_oaf, fburst = 0;
1934 dma_addr_t buf_tmp_dma;
1935 u32 flags;
1936 u32 resp_len, req_len, i, tag = tei->tag;
1937 const u32 max_resp_len = SB_RFB_MAX;
1938 u8 phy_mask;
1939
1940 slot = &mvi->slot_info[tag];
1941
1942 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
1943 task->dev->port->phy_mask;
1944 slot->tx = mvi->tx_prod;
1945 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1946 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
1947 (phy_mask << TXQ_PHY_SHIFT));
1948
1949 flags = MCH_RETRY;
1950 if (task->ssp_task.enable_first_burst) {
1951 flags |= MCH_FBURST;
1952 fburst = (1 << 7);
1953 }
1954 hdr->flags = cpu_to_le32(flags |
1955 (tei->n_elem << MCH_PRD_LEN_SHIFT) |
1956 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
1957
1958 hdr->tags = cpu_to_le32(tag);
1959 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1960
1961 /*
1962 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1963 */
1964
1965 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1966 buf_cmd = buf_tmp = slot->buf;
1967 buf_tmp_dma = slot->buf_dma;
1968
1969 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1970
1971 buf_tmp += MVS_SSP_CMD_SZ;
1972 buf_tmp_dma += MVS_SSP_CMD_SZ;
1973#if _MV_DUMP
1974 slot->cmd_size = MVS_SSP_CMD_SZ;
1975#endif
1976
1977 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1978 buf_oaf = buf_tmp;
1979 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1980
1981 buf_tmp += MVS_OAF_SZ;
1982 buf_tmp_dma += MVS_OAF_SZ;
1983
1984 /* region 3: PRD table ********************************************* */
1985 buf_prd = buf_tmp;
1986 if (tei->n_elem)
1987 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1988 else
1989 hdr->prd_tbl = 0;
1990
1991 i = sizeof(struct mvs_prd) * tei->n_elem;
1992 buf_tmp += i;
1993 buf_tmp_dma += i;
1994
1995 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1996 slot->response = buf_tmp;
1997 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1998
1999 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
2000 sizeof(struct mvs_err_info) - i;
2001 resp_len = min(resp_len, max_resp_len);
2002
2003 req_len = sizeof(struct ssp_frame_hdr) + 28;
2004
2005 /* request, response lengths */
2006 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
2007
2008 /* generate open address frame hdr (first 12 bytes) */
2009 buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
2010 buf_oaf[1] = task->dev->linkrate & 0xf;
2011 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
2012 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
2013
2014 /* fill in SSP frame header (Command Table.SSP frame header) */
2015 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
2016 ssp_hdr->frame_type = SSP_COMMAND;
2017 memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
2018 HASHED_SAS_ADDR_SIZE);
2019 memcpy(ssp_hdr->hashed_src_addr,
2020 task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
2021 ssp_hdr->tag = cpu_to_be16(tag);
2022
2023 /* fill in command frame IU */
2024 buf_cmd += sizeof(*ssp_hdr);
2025 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
2026 buf_cmd[9] = fburst | task->ssp_task.task_attr |
2027 (task->ssp_task.task_prio << 3);
2028 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
2029
2030 /* fill in PRD (scatter/gather) table, if any */
2031 for_each_sg(task->scatter, sg, tei->n_elem, i) {
2032 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
2033 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
2034 buf_prd++;
2035 }
2036
2037 return 0;
2038}
2039
2040static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
2041{
2042 struct domain_device *dev = task->dev;
2043 struct mvs_info *mvi = dev->port->ha->lldd_ha;
2044 struct pci_dev *pdev = mvi->pdev;
2045 void __iomem *regs = mvi->regs;
2046 struct mvs_task_exec_info tei;
2047 struct sas_task *t = task;
2048 struct mvs_slot_info *slot;
2049 u32 tag = 0xdeadbeef, rc, n_elem = 0;
2050 unsigned long flags;
2051 u32 n = num, pass = 0;
2052
2053 spin_lock_irqsave(&mvi->lock, flags);
2054 do {
2055 dev = t->dev;
2056 tei.port = &mvi->port[dev->port->id];
2057
2058 if (!tei.port->port_attached) {
2059 if (sas_protocol_ata(t->task_proto)) {
2060 rc = SAS_PHY_DOWN;
2061 goto out_done;
2062 } else {
2063 struct task_status_struct *ts = &t->task_status;
2064 ts->resp = SAS_TASK_UNDELIVERED;
2065 ts->stat = SAS_PHY_DOWN;
2066 t->task_done(t);
2067 if (n > 1)
2068 t = list_entry(t->list.next,
2069 struct sas_task, list);
2070 continue;
2071 }
2072 }
2073
2074 if (!sas_protocol_ata(t->task_proto)) {
2075 if (t->num_scatter) {
2076 n_elem = pci_map_sg(mvi->pdev, t->scatter,
2077 t->num_scatter,
2078 t->data_dir);
2079 if (!n_elem) {
2080 rc = -ENOMEM;
2081 goto err_out;
2082 }
2083 }
2084 } else {
2085 n_elem = t->num_scatter;
2086 }
2087
2088 rc = mvs_tag_alloc(mvi, &tag);
2089 if (rc)
2090 goto err_out;
2091
2092 slot = &mvi->slot_info[tag];
2093 t->lldd_task = NULL;
2094 slot->n_elem = n_elem;
2095 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
2096 tei.task = t;
2097 tei.hdr = &mvi->slot[tag];
2098 tei.tag = tag;
2099 tei.n_elem = n_elem;
2100
2101 switch (t->task_proto) {
2102 case SAS_PROTOCOL_SMP:
2103 rc = mvs_task_prep_smp(mvi, &tei);
2104 break;
2105 case SAS_PROTOCOL_SSP:
2106 rc = mvs_task_prep_ssp(mvi, &tei);
2107 break;
2108 case SAS_PROTOCOL_SATA:
2109 case SAS_PROTOCOL_STP:
2110 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
2111 rc = mvs_task_prep_ata(mvi, &tei);
2112 break;
2113 default:
2114 dev_printk(KERN_ERR, &pdev->dev,
2115 "unknown sas_task proto: 0x%x\n",
2116 t->task_proto);
2117 rc = -EINVAL;
2118 break;
2119 }
2120
2121 if (rc)
2122 goto err_out_tag;
2123
2124 slot->task = t;
2125 slot->port = tei.port;
2126 t->lldd_task = (void *) slot;
2127 list_add_tail(&slot->list, &slot->port->list);
2128 /* TODO: select normal or high priority */
2129
2130 spin_lock(&t->task_state_lock);
2131 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
2132 spin_unlock(&t->task_state_lock);
2133
2134 mvs_hba_memory_dump(mvi, tag, t->task_proto);
2135
2136 ++pass;
2137 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
2138 if (n > 1)
2139 t = list_entry(t->list.next, struct sas_task, list);
2140 } while (--n);
2141
2142 rc = 0;
2143 goto out_done;
2144
2145err_out_tag:
2146 mvs_tag_free(mvi, tag);
2147err_out:
2148 dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
2149 if (!sas_protocol_ata(t->task_proto))
2150 if (n_elem)
2151 pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
2152 t->data_dir);
2153out_done:
2154 if (pass)
2155 mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
2156 spin_unlock_irqrestore(&mvi->lock, flags);
2157 return rc;
2158}
2159
2160static int mvs_task_abort(struct sas_task *task)
2161{
2162 int rc;
2163 unsigned long flags;
2164 struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
2165 struct pci_dev *pdev = mvi->pdev;
2166 int tag;
2167
2168 spin_lock_irqsave(&task->task_state_lock, flags);
2169 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
2170 rc = TMF_RESP_FUNC_COMPLETE;
2171 spin_unlock_irqrestore(&task->task_state_lock, flags);
2172 goto out_done;
2173 }
2174 spin_unlock_irqrestore(&task->task_state_lock, flags);
2175
2176 switch (task->task_proto) {
2177 case SAS_PROTOCOL_SMP:
2178 dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n");
2179 break;
2180 case SAS_PROTOCOL_SSP:
2181 dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n");
2182 break;
2183 case SAS_PROTOCOL_SATA:
2184 case SAS_PROTOCOL_STP:
2185 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
2186 dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n");
2187#if _MV_DUMP
2188 dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n");
2189 mvs_hexdump(sizeof(struct host_to_dev_fis),
2190 (void *)&task->ata_task.fis, 0);
2191 dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
2192 mvs_hexdump(16, task->ata_task.atapi_packet, 0);
2193#endif
2194 spin_lock_irqsave(&task->task_state_lock, flags);
2195 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
2196 /* TODO */
2197 ;
2198 }
2199 spin_unlock_irqrestore(&task->task_state_lock, flags);
2200 break;
2201 }
2202 default:
2203 break;
2204 }
2205
2206 if (mvs_find_tag(mvi, task, &tag)) {
2207 spin_lock_irqsave(&mvi->lock, flags);
2208 mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag);
2209 spin_unlock_irqrestore(&mvi->lock, flags);
2210 }
2211 if (!mvs_task_exec(task, 1, GFP_ATOMIC))
2212 rc = TMF_RESP_FUNC_COMPLETE;
2213 else
2214 rc = TMF_RESP_FUNC_FAILED;
2215out_done:
2216 return rc;
2217}
2218
2219static void mvs_free(struct mvs_info *mvi)
2220{
2221 int i;
2222
2223 if (!mvi)
2224 return;
2225
2226 for (i = 0; i < MVS_SLOTS; i++) {
2227 struct mvs_slot_info *slot = &mvi->slot_info[i];
2228
2229 if (slot->buf)
2230 dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
2231 slot->buf, slot->buf_dma);
2232 }
2233
2234 if (mvi->tx)
2235 dma_free_coherent(&mvi->pdev->dev,
2236 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
2237 mvi->tx, mvi->tx_dma);
2238 if (mvi->rx_fis)
2239 dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
2240 mvi->rx_fis, mvi->rx_fis_dma);
2241 if (mvi->rx)
2242 dma_free_coherent(&mvi->pdev->dev,
2243 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
2244 mvi->rx, mvi->rx_dma);
2245 if (mvi->slot)
2246 dma_free_coherent(&mvi->pdev->dev,
2247 sizeof(*mvi->slot) * MVS_SLOTS,
2248 mvi->slot, mvi->slot_dma);
2249#ifdef MVS_ENABLE_PERI
2250 if (mvi->peri_regs)
2251 iounmap(mvi->peri_regs);
2252#endif
2253 if (mvi->regs)
2254 iounmap(mvi->regs);
2255 if (mvi->shost)
2256 scsi_host_put(mvi->shost);
2257 kfree(mvi->sas.sas_port);
2258 kfree(mvi->sas.sas_phy);
2259 kfree(mvi);
2260}
2261
2262/* FIXME: locking? */
2263static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
2264 void *funcdata)
2265{
2266 struct mvs_info *mvi = sas_phy->ha->lldd_ha;
2267 int rc = 0, phy_id = sas_phy->id;
2268 u32 tmp;
2269
2270 tmp = mvs_read_phy_ctl(mvi, phy_id);
2271
2272 switch (func) {
2273 case PHY_FUNC_SET_LINK_RATE:{
2274 struct sas_phy_linkrates *rates = funcdata;
2275 u32 lrmin = 0, lrmax = 0;
2276
2277 lrmin = (rates->minimum_linkrate << 8);
2278 lrmax = (rates->maximum_linkrate << 12);
2279
2280 if (lrmin) {
2281 tmp &= ~(0xf << 8);
2282 tmp |= lrmin;
2283 }
2284 if (lrmax) {
2285 tmp &= ~(0xf << 12);
2286 tmp |= lrmax;
2287 }
2288 mvs_write_phy_ctl(mvi, phy_id, tmp);
2289 break;
2290 }
2291
2292 case PHY_FUNC_HARD_RESET:
2293 if (tmp & PHY_RST_HARD)
2294 break;
2295 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
2296 break;
2297
2298 case PHY_FUNC_LINK_RESET:
2299 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
2300 break;
2301
2302 case PHY_FUNC_DISABLE:
2303 case PHY_FUNC_RELEASE_SPINUP_HOLD:
2304 default:
2305 rc = -EOPNOTSUPP;
2306 }
2307
2308 return rc;
2309}
2310
2311static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
2312{
2313 struct mvs_phy *phy = &mvi->phy[phy_id];
2314 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2315
2316 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
2317 sas_phy->class = SAS;
2318 sas_phy->iproto = SAS_PROTOCOL_ALL;
2319 sas_phy->tproto = 0;
2320 sas_phy->type = PHY_TYPE_PHYSICAL;
2321 sas_phy->role = PHY_ROLE_INITIATOR;
2322 sas_phy->oob_mode = OOB_NOT_CONNECTED;
2323 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
2324
2325 sas_phy->id = phy_id;
2326 sas_phy->sas_addr = &mvi->sas_addr[0];
2327 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
2328 sas_phy->ha = &mvi->sas;
2329 sas_phy->lldd_phy = phy;
2330}
2331
2332static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
2333 const struct pci_device_id *ent)
2334{
2335 struct mvs_info *mvi;
2336 unsigned long res_start, res_len, res_flag;
2337 struct asd_sas_phy **arr_phy;
2338 struct asd_sas_port **arr_port;
2339 const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
2340 int i;
2341
2342 /*
2343 * alloc and init our per-HBA mvs_info struct
2344 */
2345
2346 mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
2347 if (!mvi)
2348 return NULL;
2349
2350 spin_lock_init(&mvi->lock);
2351#ifdef MVS_USE_TASKLET
2352 tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi);
2353#endif
2354 mvi->pdev = pdev;
2355 mvi->chip = chip;
2356
2357 if (pdev->device == 0x6440 && pdev->revision == 0)
2358 mvi->flags |= MVF_PHY_PWR_FIX;
2359
2360 /*
2361 * alloc and init SCSI, SAS glue
2362 */
2363
2364 mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
2365 if (!mvi->shost)
2366 goto err_out;
2367
2368 arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
2369 arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
2370 if (!arr_phy || !arr_port)
2371 goto err_out;
2372
2373 for (i = 0; i < MVS_MAX_PHYS; i++) {
2374 mvs_phy_init(mvi, i);
2375 arr_phy[i] = &mvi->phy[i].sas_phy;
2376 arr_port[i] = &mvi->port[i].sas_port;
2377 mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED;
2378 mvi->port[i].wide_port_phymap = 0;
2379 mvi->port[i].port_attached = 0;
2380 INIT_LIST_HEAD(&mvi->port[i].list);
2381 }
2382
2383 SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
2384 mvi->shost->transportt = mvs_stt;
2385 mvi->shost->max_id = 21;
2386 mvi->shost->max_lun = ~0;
2387 mvi->shost->max_channel = 0;
2388 mvi->shost->max_cmd_len = 16;
2389
2390 mvi->sas.sas_ha_name = DRV_NAME;
2391 mvi->sas.dev = &pdev->dev;
2392 mvi->sas.lldd_module = THIS_MODULE;
2393 mvi->sas.sas_addr = &mvi->sas_addr[0];
2394 mvi->sas.sas_phy = arr_phy;
2395 mvi->sas.sas_port = arr_port;
2396 mvi->sas.num_phys = chip->n_phy;
2397 mvi->sas.lldd_max_execute_num = 1;
2398 mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
2399 mvi->shost->can_queue = MVS_CAN_QUEUE;
2400 mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys;
2401 mvi->sas.lldd_ha = mvi;
2402 mvi->sas.core.shost = mvi->shost;
2403
2404 mvs_tag_init(mvi);
2405
2406 /*
2407 * ioremap main and peripheral registers
2408 */
2409
2410#ifdef MVS_ENABLE_PERI
2411 res_start = pci_resource_start(pdev, 2);
2412 res_len = pci_resource_len(pdev, 2);
2413 if (!res_start || !res_len)
2414 goto err_out;
2415
2416 mvi->peri_regs = ioremap_nocache(res_start, res_len);
2417 if (!mvi->peri_regs)
2418 goto err_out;
2419#endif
2420
2421 res_start = pci_resource_start(pdev, 4);
2422 res_len = pci_resource_len(pdev, 4);
2423 if (!res_start || !res_len)
2424 goto err_out;
2425
2426 res_flag = pci_resource_flags(pdev, 4);
2427 if (res_flag & IORESOURCE_CACHEABLE)
2428 mvi->regs = ioremap(res_start, res_len);
2429 else
2430 mvi->regs = ioremap_nocache(res_start, res_len);
2431
2432 if (!mvi->regs)
2433 goto err_out;
2434
2435 /*
2436 * alloc and init our DMA areas
2437 */
2438
2439 mvi->tx = dma_alloc_coherent(&pdev->dev,
2440 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
2441 &mvi->tx_dma, GFP_KERNEL);
2442 if (!mvi->tx)
2443 goto err_out;
2444 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
2445
2446 mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
2447 &mvi->rx_fis_dma, GFP_KERNEL);
2448 if (!mvi->rx_fis)
2449 goto err_out;
2450 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
2451
2452 mvi->rx = dma_alloc_coherent(&pdev->dev,
2453 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
2454 &mvi->rx_dma, GFP_KERNEL);
2455 if (!mvi->rx)
2456 goto err_out;
2457 memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
2458
2459 mvi->rx[0] = cpu_to_le32(0xfff);
2460 mvi->rx_cons = 0xfff;
2461
2462 mvi->slot = dma_alloc_coherent(&pdev->dev,
2463 sizeof(*mvi->slot) * MVS_SLOTS,
2464 &mvi->slot_dma, GFP_KERNEL);
2465 if (!mvi->slot)
2466 goto err_out;
2467 memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
2468
2469 for (i = 0; i < MVS_SLOTS; i++) {
2470 struct mvs_slot_info *slot = &mvi->slot_info[i];
2471
2472 slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
2473 &slot->buf_dma, GFP_KERNEL);
2474 if (!slot->buf)
2475 goto err_out;
2476 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
2477 }
2478
2479 /* finally, read NVRAM to get our SAS address */
2480 if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
2481 goto err_out;
2482 return mvi;
2483
2484err_out:
2485 mvs_free(mvi);
2486 return NULL;
2487}
2488
2489static u32 mvs_cr32(void __iomem *regs, u32 addr)
2490{
2491 mw32(CMD_ADDR, addr);
2492 return mr32(CMD_DATA);
2493}
2494
2495static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
2496{
2497 mw32(CMD_ADDR, addr);
2498 mw32(CMD_DATA, val);
2499}
2500
2501static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
2502{
2503 void __iomem *regs = mvi->regs;
2504 return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
2505 mr32(P4_SER_CTLSTAT + (port - 4) * 4);
2506}
2507
2508static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
2509{
2510 void __iomem *regs = mvi->regs;
2511 if (port < 4)
2512 mw32(P0_SER_CTLSTAT + port * 4, val);
2513 else
2514 mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
2515}
2516
2517static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
2518{
2519 void __iomem *regs = mvi->regs + off;
2520 void __iomem *regs2 = mvi->regs + off2;
2521 return (port < 4)?readl(regs + port * 8):
2522 readl(regs2 + (port - 4) * 8);
2523}
2524
2525static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
2526 u32 port, u32 val)
2527{
2528 void __iomem *regs = mvi->regs + off;
2529 void __iomem *regs2 = mvi->regs + off2;
2530 if (port < 4)
2531 writel(val, regs + port * 8);
2532 else
2533 writel(val, regs2 + (port - 4) * 8);
2534}
2535
2536static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
2537{
2538 return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
2539}
2540
2541static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
2542{
2543 mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
2544}
2545
2546static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
2547{
2548 mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
2549}
2550
2551static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
2552{
2553 return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
2554}
2555
2556static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
2557{
2558 mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
2559}
2560
2561static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
2562{
2563 mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
2564}
2565
2566static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
2567{
2568 return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
2569}
2570
2571static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
2572{
2573 mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
2574}
2575
2576static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
2577{
2578 return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
2579}
2580
2581static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
2582{
2583 mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
2584}
2585
2586static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
2587{
2588 void __iomem *regs = mvi->regs;
2589 u32 tmp;
2590
2591 /* workaround for SATA R-ERR, to ignore phy glitch */
2592 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
2593 tmp &= ~(1 << 9);
2594 tmp |= (1 << 10);
2595 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2596
2597 /* enable retry 127 times */
2598 mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
2599
2600 /* extend open frame timeout to max */
2601 tmp = mvs_cr32(regs, CMD_SAS_CTL0);
2602 tmp &= ~0xffff;
2603 tmp |= 0x3fff;
2604 mvs_cw32(regs, CMD_SAS_CTL0, tmp);
2605
2606 /* workaround for WDTIMEOUT , set to 550 ms */
2607 mvs_cw32(regs, CMD_WD_TIMER, 0x86470);
2608
2609 /* not to halt for different port op during wideport link change */
2610 mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
2611
2612 /* workaround for Seagate disk not-found OOB sequence, recv
2613 * COMINIT before sending out COMWAKE */
2614 tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
2615 tmp &= 0x0000ffff;
2616 tmp |= 0x00fa0000;
2617 mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
2618
2619 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
2620 tmp &= 0x1fffffff;
2621 tmp |= (2U << 29); /* 8 ms retry */
2622 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2623
2624 /* TEST - for phy decoding error, adjust voltage levels */
2625 mw32(P0_VSR_ADDR + 0, 0x8);
2626 mw32(P0_VSR_DATA + 0, 0x2F0);
2627
2628 mw32(P0_VSR_ADDR + 8, 0x8);
2629 mw32(P0_VSR_DATA + 8, 0x2F0);
2630
2631 mw32(P0_VSR_ADDR + 16, 0x8);
2632 mw32(P0_VSR_DATA + 16, 0x2F0);
2633
2634 mw32(P0_VSR_ADDR + 24, 0x8);
2635 mw32(P0_VSR_DATA + 24, 0x2F0);
2636
2637}
2638
2639static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
2640{
2641 void __iomem *regs = mvi->regs;
2642 u32 tmp;
2643
2644 tmp = mr32(PCS);
2645 if (mvi->chip->n_phy <= 4)
2646 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
2647 else
2648 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
2649 mw32(PCS, tmp);
2650}
2651
2652static void mvs_detect_porttype(struct mvs_info *mvi, int i)
2653{
2654 void __iomem *regs = mvi->regs;
2655 u32 reg;
2656 struct mvs_phy *phy = &mvi->phy[i];
2657
2658 /* TODO check & save device type */
2659 reg = mr32(GBL_PORT_TYPE);
2660
2661 if (reg & MODE_SAS_SATA & (1 << i))
2662 phy->phy_type |= PORT_TYPE_SAS;
2663 else
2664 phy->phy_type |= PORT_TYPE_SATA;
2665}
2666
2667static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
2668{
2669 u32 *s = (u32 *) buf;
2670
2671 if (!s)
2672 return NULL;
2673
2674 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
2675 s[3] = mvs_read_port_cfg_data(mvi, i);
2676
2677 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
2678 s[2] = mvs_read_port_cfg_data(mvi, i);
2679
2680 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
2681 s[1] = mvs_read_port_cfg_data(mvi, i);
2682
2683 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
2684 s[0] = mvs_read_port_cfg_data(mvi, i);
2685
2686 return (void *)s;
2687}
2688
2689static u32 mvs_is_sig_fis_received(u32 irq_status)
2690{
2691 return irq_status & PHYEV_SIG_FIS;
2692}
2693
2694static void mvs_update_wideport(struct mvs_info *mvi, int i)
2695{
2696 struct mvs_phy *phy = &mvi->phy[i];
2697 struct mvs_port *port = phy->port;
2698 int j, no;
2699
2700 for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
2701 if (no & 1) {
2702 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2703 mvs_write_port_cfg_data(mvi, no,
2704 port->wide_port_phymap);
2705 } else {
2706 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2707 mvs_write_port_cfg_data(mvi, no, 0);
2708 }
2709}
2710
2711static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
2712{
2713 u32 tmp;
2714 struct mvs_phy *phy = &mvi->phy[i];
2715 struct mvs_port *port = phy->port;;
2716
2717 tmp = mvs_read_phy_ctl(mvi, i);
2718
2719 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
2720 if (!port)
2721 phy->phy_attached = 1;
2722 return tmp;
2723 }
2724
2725 if (port) {
2726 if (phy->phy_type & PORT_TYPE_SAS) {
2727 port->wide_port_phymap &= ~(1U << i);
2728 if (!port->wide_port_phymap)
2729 port->port_attached = 0;
2730 mvs_update_wideport(mvi, i);
2731 } else if (phy->phy_type & PORT_TYPE_SATA)
2732 port->port_attached = 0;
2733 mvs_free_reg_set(mvi, phy->port);
2734 phy->port = NULL;
2735 phy->phy_attached = 0;
2736 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
2737 }
2738 return 0;
2739}
2740
2741static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
2742 int get_st)
2743{
2744 struct mvs_phy *phy = &mvi->phy[i];
2745 struct pci_dev *pdev = mvi->pdev;
2746 u32 tmp;
2747 u64 tmp64;
2748
2749 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
2750 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
2751
2752 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2753 phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2754
2755 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2756 phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2757
2758 if (get_st) {
2759 phy->irq_status = mvs_read_port_irq_stat(mvi, i);
2760 phy->phy_status = mvs_is_phy_ready(mvi, i);
2761 }
2762
2763 if (phy->phy_status) {
2764 u32 phy_st;
2765 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
2766
2767 mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
2768 phy_st = mvs_read_port_cfg_data(mvi, i);
2769
2770 sas_phy->linkrate =
2771 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2772 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
2773 phy->minimum_linkrate =
2774 (phy->phy_status &
2775 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
2776 phy->maximum_linkrate =
2777 (phy->phy_status &
2778 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
2779
2780 if (phy->phy_type & PORT_TYPE_SAS) {
2781 /* Updated attached_sas_addr */
2782 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
2783 phy->att_dev_sas_addr =
2784 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2785 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
2786 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2787 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
2788 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
2789 phy->identify.device_type =
2790 phy->att_dev_info & PORT_DEV_TYPE_MASK;
2791
2792 if (phy->identify.device_type == SAS_END_DEV)
2793 phy->identify.target_port_protocols =
2794 SAS_PROTOCOL_SSP;
2795 else if (phy->identify.device_type != NO_DEVICE)
2796 phy->identify.target_port_protocols =
2797 SAS_PROTOCOL_SMP;
2798 if (phy_st & PHY_OOB_DTCTD)
2799 sas_phy->oob_mode = SAS_OOB_MODE;
2800 phy->frame_rcvd_size =
2801 sizeof(struct sas_identify_frame);
2802 } else if (phy->phy_type & PORT_TYPE_SATA) {
2803 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
2804 if (mvs_is_sig_fis_received(phy->irq_status)) {
2805 phy->att_dev_sas_addr = i; /* temp */
2806 if (phy_st & PHY_OOB_DTCTD)
2807 sas_phy->oob_mode = SATA_OOB_MODE;
2808 phy->frame_rcvd_size =
2809 sizeof(struct dev_to_host_fis);
2810 mvs_get_d2h_reg(mvi, i,
2811 (void *)sas_phy->frame_rcvd);
2812 } else {
2813 dev_printk(KERN_DEBUG, &pdev->dev,
2814 "No sig fis\n");
2815 phy->phy_type &= ~(PORT_TYPE_SATA);
2816 goto out_done;
2817 }
2818 }
2819 tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
2820 memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
2821
2822 dev_printk(KERN_DEBUG, &pdev->dev,
2823 "phy[%d] Get Attached Address 0x%llX ,"
2824 " SAS Address 0x%llX\n",
2825 i,
2826 (unsigned long long)phy->att_dev_sas_addr,
2827 (unsigned long long)phy->dev_sas_addr);
2828 dev_printk(KERN_DEBUG, &pdev->dev,
2829 "Rate = %x , type = %d\n",
2830 sas_phy->linkrate, phy->phy_type);
2831
2832 /* workaround for HW phy decoding error on 1.5g disk drive */
2833 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
2834 tmp = mvs_read_port_vsr_data(mvi, i);
2835 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2836 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
2837 SAS_LINK_RATE_1_5_GBPS)
2838 tmp &= ~PHY_MODE6_LATECLK;
2839 else
2840 tmp |= PHY_MODE6_LATECLK;
2841 mvs_write_port_vsr_data(mvi, i, tmp);
2842
2843 }
2844out_done:
2845 if (get_st)
2846 mvs_write_port_irq_stat(mvi, i, phy->irq_status);
2847}
2848
2849static void mvs_port_formed(struct asd_sas_phy *sas_phy)
2850{
2851 struct sas_ha_struct *sas_ha = sas_phy->ha;
2852 struct mvs_info *mvi = sas_ha->lldd_ha;
2853 struct asd_sas_port *sas_port = sas_phy->port;
2854 struct mvs_phy *phy = sas_phy->lldd_phy;
2855 struct mvs_port *port = &mvi->port[sas_port->id];
2856 unsigned long flags;
2857
2858 spin_lock_irqsave(&mvi->lock, flags);
2859 port->port_attached = 1;
2860 phy->port = port;
2861 port->taskfileset = MVS_ID_NOT_MAPPED;
2862 if (phy->phy_type & PORT_TYPE_SAS) {
2863 port->wide_port_phymap = sas_port->phy_mask;
2864 mvs_update_wideport(mvi, sas_phy->id);
2865 }
2866 spin_unlock_irqrestore(&mvi->lock, flags);
2867}
2868
2869static int mvs_I_T_nexus_reset(struct domain_device *dev)
2870{
2871 return TMF_RESP_FUNC_FAILED;
2872}
2873
2874static int __devinit mvs_hw_init(struct mvs_info *mvi)
2875{
2876 void __iomem *regs = mvi->regs;
2877 int i;
2878 u32 tmp, cctl;
2879
2880 /* make sure interrupts are masked immediately (paranoia) */
2881 mw32(GBL_CTL, 0);
2882 tmp = mr32(GBL_CTL);
2883
2884 /* Reset Controller */
2885 if (!(tmp & HBA_RST)) {
2886 if (mvi->flags & MVF_PHY_PWR_FIX) {
2887 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
2888 tmp &= ~PCTL_PWR_ON;
2889 tmp |= PCTL_OFF;
2890 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
2891
2892 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
2893 tmp &= ~PCTL_PWR_ON;
2894 tmp |= PCTL_OFF;
2895 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
2896 }
2897
2898 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
2899 mw32_f(GBL_CTL, HBA_RST);
2900 }
2901
2902 /* wait for reset to finish; timeout is just a guess */
2903 i = 1000;
2904 while (i-- > 0) {
2905 msleep(10);
2906
2907 if (!(mr32(GBL_CTL) & HBA_RST))
2908 break;
2909 }
2910 if (mr32(GBL_CTL) & HBA_RST) {
2911 dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
2912 return -EBUSY;
2913 }
2914
2915 /* Init Chip */
2916 /* make sure RST is set; HBA_RST /should/ have done that for us */
2917 cctl = mr32(CTL);
2918 if (cctl & CCTL_RST)
2919 cctl &= ~CCTL_RST;
2920 else
2921 mw32_f(CTL, cctl | CCTL_RST);
2922
2923 /* write to device control _AND_ device status register? - A.C. */
2924 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
2925 tmp &= ~PRD_REQ_MASK;
2926 tmp |= PRD_REQ_SIZE;
2927 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
2928
2929 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
2930 tmp |= PCTL_PWR_ON;
2931 tmp &= ~PCTL_OFF;
2932 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
2933
2934 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
2935 tmp |= PCTL_PWR_ON;
2936 tmp &= ~PCTL_OFF;
2937 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
2938
2939 mw32_f(CTL, cctl);
2940
2941 /* reset control */
2942 mw32(PCS, 0); /*MVS_PCS */
2943
2944 mvs_phy_hacks(mvi);
2945
2946 mw32(CMD_LIST_LO, mvi->slot_dma);
2947 mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
2948
2949 mw32(RX_FIS_LO, mvi->rx_fis_dma);
2950 mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
2951
2952 mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
2953 mw32(TX_LO, mvi->tx_dma);
2954 mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
2955
2956 mw32(RX_CFG, MVS_RX_RING_SZ);
2957 mw32(RX_LO, mvi->rx_dma);
2958 mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
2959
2960 /* enable auto port detection */
2961 mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
2962 msleep(1100);
2963 /* init and reset phys */
2964 for (i = 0; i < mvi->chip->n_phy; i++) {
2965 u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
2966 u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
2967
2968 mvs_detect_porttype(mvi, i);
2969
2970 /* set phy local SAS address */
2971 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2972 mvs_write_port_cfg_data(mvi, i, lo);
2973 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2974 mvs_write_port_cfg_data(mvi, i, hi);
2975
2976 /* reset phy */
2977 tmp = mvs_read_phy_ctl(mvi, i);
2978 tmp |= PHY_RST;
2979 mvs_write_phy_ctl(mvi, i, tmp);
2980 }
2981
2982 msleep(100);
2983
2984 for (i = 0; i < mvi->chip->n_phy; i++) {
2985 /* clear phy int status */
2986 tmp = mvs_read_port_irq_stat(mvi, i);
2987 tmp &= ~PHYEV_SIG_FIS;
2988 mvs_write_port_irq_stat(mvi, i, tmp);
2989
2990 /* set phy int mask */
2991 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
2992 PHYEV_ID_DONE | PHYEV_DEC_ERR;
2993 mvs_write_port_irq_mask(mvi, i, tmp);
2994
2995 msleep(100);
2996 mvs_update_phyinfo(mvi, i, 1);
2997 mvs_enable_xmt(mvi, i);
2998 }
2999
3000 /* FIXME: update wide port bitmaps */
3001
3002 /* little endian for open address and command table, etc. */
3003 /* A.C.
3004 * it seems that ( from the spec ) turning on big-endian won't
3005 * do us any good on big-endian machines, need further confirmation
3006 */
3007 cctl = mr32(CTL);
3008 cctl |= CCTL_ENDIAN_CMD;
3009 cctl |= CCTL_ENDIAN_DATA;
3010 cctl &= ~CCTL_ENDIAN_OPEN;
3011 cctl |= CCTL_ENDIAN_RSP;
3012 mw32_f(CTL, cctl);
3013
3014 /* reset CMD queue */
3015 tmp = mr32(PCS);
3016 tmp |= PCS_CMD_RST;
3017 mw32(PCS, tmp);
3018 /* interrupt coalescing may cause missing HW interrput in some case,
3019 * and the max count is 0x1ff, while our max slot is 0x200,
3020 * it will make count 0.
3021 */
3022 tmp = 0;
3023 mw32(INT_COAL, tmp);
3024
3025 tmp = 0x100;
3026 mw32(INT_COAL_TMOUT, tmp);
3027
3028 /* ladies and gentlemen, start your engines */
3029 mw32(TX_CFG, 0);
3030 mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
3031 mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
3032 /* enable CMD/CMPL_Q/RESP mode */
3033 mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
3034
3035 /* enable completion queue interrupt */
3036 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS);
3037 mw32(INT_MASK, tmp);
3038
3039 /* Enable SRS interrupt */
3040 mw32(INT_MASK_SRS, 0xFF);
3041 return 0;
3042}
3043
3044static void __devinit mvs_print_info(struct mvs_info *mvi)
3045{
3046 struct pci_dev *pdev = mvi->pdev;
3047 static int printed_version;
3048
3049 if (!printed_version++)
3050 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3051
3052 dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
3053 mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
3054}
3055
3056static int __devinit mvs_pci_init(struct pci_dev *pdev,
3057 const struct pci_device_id *ent)
3058{
3059 int rc;
3060 struct mvs_info *mvi;
3061 irq_handler_t irq_handler = mvs_interrupt;
3062
3063 rc = pci_enable_device(pdev);
3064 if (rc)
3065 return rc;
3066
3067 pci_set_master(pdev);
3068
3069 rc = pci_request_regions(pdev, DRV_NAME);
3070 if (rc)
3071 goto err_out_disable;
3072
3073 rc = pci_go_64(pdev);
3074 if (rc)
3075 goto err_out_regions;
3076
3077 mvi = mvs_alloc(pdev, ent);
3078 if (!mvi) {
3079 rc = -ENOMEM;
3080 goto err_out_regions;
3081 }
3082
3083 rc = mvs_hw_init(mvi);
3084 if (rc)
3085 goto err_out_mvi;
3086
3087#ifndef MVS_DISABLE_MSI
3088 if (!pci_enable_msi(pdev)) {
3089 u32 tmp;
3090 void __iomem *regs = mvi->regs;
3091 mvi->flags |= MVF_MSI;
3092 irq_handler = mvs_msi_interrupt;
3093 tmp = mr32(PCS);
3094 mw32(PCS, tmp | PCS_SELF_CLEAR);
3095 }
3096#endif
3097
3098 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
3099 if (rc)
3100 goto err_out_msi;
3101
3102 rc = scsi_add_host(mvi->shost, &pdev->dev);
3103 if (rc)
3104 goto err_out_irq;
3105
3106 rc = sas_register_ha(&mvi->sas);
3107 if (rc)
3108 goto err_out_shost;
3109
3110 pci_set_drvdata(pdev, mvi);
3111
3112 mvs_print_info(mvi);
3113
3114 mvs_hba_interrupt_enable(mvi);
3115
3116 scsi_scan_host(mvi->shost);
3117
3118 return 0;
3119
3120err_out_shost:
3121 scsi_remove_host(mvi->shost);
3122err_out_irq:
3123 free_irq(pdev->irq, mvi);
3124err_out_msi:
3125 if (mvi->flags |= MVF_MSI)
3126 pci_disable_msi(pdev);
3127err_out_mvi:
3128 mvs_free(mvi);
3129err_out_regions:
3130 pci_release_regions(pdev);
3131err_out_disable:
3132 pci_disable_device(pdev);
3133 return rc;
3134}
3135
3136static void __devexit mvs_pci_remove(struct pci_dev *pdev)
3137{
3138 struct mvs_info *mvi = pci_get_drvdata(pdev);
3139
3140 pci_set_drvdata(pdev, NULL);
3141
3142 if (mvi) {
3143 sas_unregister_ha(&mvi->sas);
3144 mvs_hba_interrupt_disable(mvi);
3145 sas_remove_host(mvi->shost);
3146 scsi_remove_host(mvi->shost);
3147
3148 free_irq(pdev->irq, mvi);
3149 if (mvi->flags & MVF_MSI)
3150 pci_disable_msi(pdev);
3151 mvs_free(mvi);
3152 pci_release_regions(pdev);
3153 }
3154 pci_disable_device(pdev);
3155}
3156
3157static struct sas_domain_function_template mvs_transport_ops = {
3158 .lldd_execute_task = mvs_task_exec,
3159 .lldd_control_phy = mvs_phy_control,
3160 .lldd_abort_task = mvs_task_abort,
3161 .lldd_port_formed = mvs_port_formed,
3162 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
3163};
3164
3165static struct pci_device_id __devinitdata mvs_pci_table[] = {
3166 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
3167 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
3168 {
3169 .vendor = PCI_VENDOR_ID_MARVELL,
3170 .device = 0x6440,
3171 .subvendor = PCI_ANY_ID,
3172 .subdevice = 0x6480,
3173 .class = 0,
3174 .class_mask = 0,
3175 .driver_data = chip_6480,
3176 },
3177 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
3178 { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
3179
3180 { } /* terminate list */
3181};
3182
3183static struct pci_driver mvs_pci_driver = {
3184 .name = DRV_NAME,
3185 .id_table = mvs_pci_table,
3186 .probe = mvs_pci_init,
3187 .remove = __devexit_p(mvs_pci_remove),
3188};
3189
3190static int __init mvs_init(void)
3191{
3192 int rc;
3193
3194 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
3195 if (!mvs_stt)
3196 return -ENOMEM;
3197
3198 rc = pci_register_driver(&mvs_pci_driver);
3199 if (rc)
3200 goto err_out;
3201
3202 return 0;
3203
3204err_out:
3205 sas_release_transport(mvs_stt);
3206 return rc;
3207}
3208
3209static void __exit mvs_exit(void)
3210{
3211 pci_unregister_driver(&mvs_pci_driver);
3212 sas_release_transport(mvs_stt);
3213}
3214
3215module_init(mvs_init);
3216module_exit(mvs_exit);
3217
3218MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
3219MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
3220MODULE_VERSION(DRV_VERSION);
3221MODULE_LICENSE("GPL");
3222MODULE_DEVICE_TABLE(pci, mvs_pci_table);
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
new file mode 100644
index 000000000000..6de7af27e507
--- /dev/null
+++ b/drivers/scsi/mvsas/Kconfig
@@ -0,0 +1,42 @@
1#
2# Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver.
3#
4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com>
6#
7# This file is licensed under GPLv2.
8#
9# This file is part of the 88SE64XX/88SE94XX driver.
10#
11# The 88SE64XX/88SE94XX driver is free software; you can redistribute
12# it and/or modify it under the terms of the GNU General Public License
13# as published by the Free Software Foundation; version 2 of the
14# License.
15#
16# The 88SE64XX/88SE94XX driver is distributed in the hope that it will be
17# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19# General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with 88SE64XX/88SE94XX Driver; if not, write to the Free Software
23# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24#
25#
26
27config SCSI_MVSAS
28 tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support"
29 depends on PCI
30 select SCSI_SAS_LIBSAS
31 select FW_LOADER
32 help
33 This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX and 6Gb/s
34 PCI-E 88SE94XX chip based host adapters.
35
36config SCSI_MVSAS_DEBUG
37 bool "Compile in debug mode"
38 default y
39 depends on SCSI_MVSAS
40 help
41 Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode,
42 the driver prints some messages to the console.
diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile
new file mode 100644
index 000000000000..52ac4264677d
--- /dev/null
+++ b/drivers/scsi/mvsas/Makefile
@@ -0,0 +1,32 @@
1#
2# Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver.
3#
4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com>
6#
7# This file is licensed under GPLv2.
8#
9# This program is free software; you can redistribute it and/or
10# modify it under the terms of the GNU General Public License as
11# published by the Free Software Foundation; version 2 of the
12# License.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17# General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program; if not, write to the Free Software
21# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22# USA
23
24ifeq ($(CONFIG_SCSI_MVSAS_DEBUG),y)
25 EXTRA_CFLAGS += -DMV_DEBUG
26endif
27
28obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
29mvsas-y += mv_init.o \
30 mv_sas.o \
31 mv_64xx.o \
32 mv_94xx.o
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
new file mode 100644
index 000000000000..10a5077b6aed
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -0,0 +1,793 @@
1/*
2 * Marvell 88SE64xx hardware specific
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#include "mv_sas.h"
26#include "mv_64xx.h"
27#include "mv_chips.h"
28
29static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
30{
31 void __iomem *regs = mvi->regs;
32 u32 reg;
33 struct mvs_phy *phy = &mvi->phy[i];
34
35 /* TODO check & save device type */
36 reg = mr32(MVS_GBL_PORT_TYPE);
37 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
38 if (reg & MODE_SAS_SATA & (1 << i))
39 phy->phy_type |= PORT_TYPE_SAS;
40 else
41 phy->phy_type |= PORT_TYPE_SATA;
42}
43
44static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
45{
46 void __iomem *regs = mvi->regs;
47 u32 tmp;
48
49 tmp = mr32(MVS_PCS);
50 if (mvi->chip->n_phy <= 4)
51 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
52 else
53 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
54 mw32(MVS_PCS, tmp);
55}
56
57static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
58{
59 void __iomem *regs = mvi->regs;
60
61 mvs_phy_hacks(mvi);
62
63 if (!(mvi->flags & MVF_FLAG_SOC)) {
64 /* TEST - for phy decoding error, adjust voltage levels */
65 mw32(MVS_P0_VSR_ADDR + 0, 0x8);
66 mw32(MVS_P0_VSR_DATA + 0, 0x2F0);
67
68 mw32(MVS_P0_VSR_ADDR + 8, 0x8);
69 mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
70
71 mw32(MVS_P0_VSR_ADDR + 16, 0x8);
72 mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
73
74 mw32(MVS_P0_VSR_ADDR + 24, 0x8);
75 mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
76 } else {
77 int i;
78 /* disable auto port detection */
79 mw32(MVS_GBL_PORT_TYPE, 0);
80 for (i = 0; i < mvi->chip->n_phy; i++) {
81 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7);
82 mvs_write_port_vsr_data(mvi, i, 0x90000000);
83 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9);
84 mvs_write_port_vsr_data(mvi, i, 0x50f2);
85 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11);
86 mvs_write_port_vsr_data(mvi, i, 0x0e);
87 }
88 }
89}
90
91static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
92{
93 void __iomem *regs = mvi->regs;
94 u32 reg, tmp;
95
96 if (!(mvi->flags & MVF_FLAG_SOC)) {
97 if (phy_id < 4)
98 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
99 else
100 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
101
102 } else
103 reg = mr32(MVS_PHY_CTL);
104
105 tmp = reg;
106 if (phy_id < 4)
107 tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
108 else
109 tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS;
110
111 if (!(mvi->flags & MVF_FLAG_SOC)) {
112 if (phy_id < 4) {
113 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
114 mdelay(10);
115 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
116 } else {
117 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
118 mdelay(10);
119 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg);
120 }
121 } else {
122 mw32(MVS_PHY_CTL, tmp);
123 mdelay(10);
124 mw32(MVS_PHY_CTL, reg);
125 }
126}
127
128static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
129{
130 u32 tmp;
131 tmp = mvs_read_port_irq_stat(mvi, phy_id);
132 tmp &= ~PHYEV_RDY_CH;
133 mvs_write_port_irq_stat(mvi, phy_id, tmp);
134 tmp = mvs_read_phy_ctl(mvi, phy_id);
135 if (hard)
136 tmp |= PHY_RST_HARD;
137 else
138 tmp |= PHY_RST;
139 mvs_write_phy_ctl(mvi, phy_id, tmp);
140 if (hard) {
141 do {
142 tmp = mvs_read_phy_ctl(mvi, phy_id);
143 } while (tmp & PHY_RST_HARD);
144 }
145}
146
147static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
148{
149 void __iomem *regs = mvi->regs;
150 u32 tmp;
151 int i;
152
153 /* make sure interrupts are masked immediately (paranoia) */
154 mw32(MVS_GBL_CTL, 0);
155 tmp = mr32(MVS_GBL_CTL);
156
157 /* Reset Controller */
158 if (!(tmp & HBA_RST)) {
159 if (mvi->flags & MVF_PHY_PWR_FIX) {
160 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
161 tmp &= ~PCTL_PWR_OFF;
162 tmp |= PCTL_PHY_DSBL;
163 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
164
165 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
166 tmp &= ~PCTL_PWR_OFF;
167 tmp |= PCTL_PHY_DSBL;
168 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
169 }
170 }
171
172 /* make sure interrupts are masked immediately (paranoia) */
173 mw32(MVS_GBL_CTL, 0);
174 tmp = mr32(MVS_GBL_CTL);
175
176 /* Reset Controller */
177 if (!(tmp & HBA_RST)) {
178 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
179 mw32_f(MVS_GBL_CTL, HBA_RST);
180 }
181
182 /* wait for reset to finish; timeout is just a guess */
183 i = 1000;
184 while (i-- > 0) {
185 msleep(10);
186
187 if (!(mr32(MVS_GBL_CTL) & HBA_RST))
188 break;
189 }
190 if (mr32(MVS_GBL_CTL) & HBA_RST) {
191 dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n");
192 return -EBUSY;
193 }
194 return 0;
195}
196
197static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
198{
199 void __iomem *regs = mvi->regs;
200 u32 tmp;
201 if (!(mvi->flags & MVF_FLAG_SOC)) {
202 u32 offs;
203 if (phy_id < 4)
204 offs = PCR_PHY_CTL;
205 else {
206 offs = PCR_PHY_CTL2;
207 phy_id -= 4;
208 }
209 pci_read_config_dword(mvi->pdev, offs, &tmp);
210 tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
211 pci_write_config_dword(mvi->pdev, offs, tmp);
212 } else {
213 tmp = mr32(MVS_PHY_CTL);
214 tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
215 mw32(MVS_PHY_CTL, tmp);
216 }
217}
218
219static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
220{
221 void __iomem *regs = mvi->regs;
222 u32 tmp;
223 if (!(mvi->flags & MVF_FLAG_SOC)) {
224 u32 offs;
225 if (phy_id < 4)
226 offs = PCR_PHY_CTL;
227 else {
228 offs = PCR_PHY_CTL2;
229 phy_id -= 4;
230 }
231 pci_read_config_dword(mvi->pdev, offs, &tmp);
232 tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
233 pci_write_config_dword(mvi->pdev, offs, tmp);
234 } else {
235 tmp = mr32(MVS_PHY_CTL);
236 tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
237 mw32(MVS_PHY_CTL, tmp);
238 }
239}
240
241static int __devinit mvs_64xx_init(struct mvs_info *mvi)
242{
243 void __iomem *regs = mvi->regs;
244 int i;
245 u32 tmp, cctl;
246
247 if (mvi->pdev && mvi->pdev->revision == 0)
248 mvi->flags |= MVF_PHY_PWR_FIX;
249 if (!(mvi->flags & MVF_FLAG_SOC)) {
250 mvs_show_pcie_usage(mvi);
251 tmp = mvs_64xx_chip_reset(mvi);
252 if (tmp)
253 return tmp;
254 } else {
255 tmp = mr32(MVS_PHY_CTL);
256 tmp &= ~PCTL_PWR_OFF;
257 tmp |= PCTL_PHY_DSBL;
258 mw32(MVS_PHY_CTL, tmp);
259 }
260
261 /* Init Chip */
262 /* make sure RST is set; HBA_RST /should/ have done that for us */
263 cctl = mr32(MVS_CTL) & 0xFFFF;
264 if (cctl & CCTL_RST)
265 cctl &= ~CCTL_RST;
266 else
267 mw32_f(MVS_CTL, cctl | CCTL_RST);
268
269 if (!(mvi->flags & MVF_FLAG_SOC)) {
270 /* write to device control _AND_ device status register */
271 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
272 tmp &= ~PRD_REQ_MASK;
273 tmp |= PRD_REQ_SIZE;
274 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
275
276 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
277 tmp &= ~PCTL_PWR_OFF;
278 tmp &= ~PCTL_PHY_DSBL;
279 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
280
281 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
282 tmp &= PCTL_PWR_OFF;
283 tmp &= ~PCTL_PHY_DSBL;
284 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
285 } else {
286 tmp = mr32(MVS_PHY_CTL);
287 tmp &= ~PCTL_PWR_OFF;
288 tmp |= PCTL_COM_ON;
289 tmp &= ~PCTL_PHY_DSBL;
290 tmp |= PCTL_LINK_RST;
291 mw32(MVS_PHY_CTL, tmp);
292 msleep(100);
293 tmp &= ~PCTL_LINK_RST;
294 mw32(MVS_PHY_CTL, tmp);
295 msleep(100);
296 }
297
298 /* reset control */
299 mw32(MVS_PCS, 0); /* MVS_PCS */
300 /* init phys */
301 mvs_64xx_phy_hacks(mvi);
302
303 /* enable auto port detection */
304 mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
305
306 mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
307 mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
308
309 mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
310 mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
311
312 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
313 mw32(MVS_TX_LO, mvi->tx_dma);
314 mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
315
316 mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
317 mw32(MVS_RX_LO, mvi->rx_dma);
318 mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
319
320 for (i = 0; i < mvi->chip->n_phy; i++) {
321 /* set phy local SAS address */
322 /* should set little endian SAS address to 64xx chip */
323 mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI,
324 cpu_to_be64(mvi->phy[i].dev_sas_addr));
325
326 mvs_64xx_enable_xmt(mvi, i);
327
328 mvs_64xx_phy_reset(mvi, i, 1);
329 msleep(500);
330 mvs_64xx_detect_porttype(mvi, i);
331 }
332 if (mvi->flags & MVF_FLAG_SOC) {
333 /* set select registers */
334 writel(0x0E008000, regs + 0x000);
335 writel(0x59000008, regs + 0x004);
336 writel(0x20, regs + 0x008);
337 writel(0x20, regs + 0x00c);
338 writel(0x20, regs + 0x010);
339 writel(0x20, regs + 0x014);
340 writel(0x20, regs + 0x018);
341 writel(0x20, regs + 0x01c);
342 }
343 for (i = 0; i < mvi->chip->n_phy; i++) {
344 /* clear phy int status */
345 tmp = mvs_read_port_irq_stat(mvi, i);
346 tmp &= ~PHYEV_SIG_FIS;
347 mvs_write_port_irq_stat(mvi, i, tmp);
348
349 /* set phy int mask */
350 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
351 PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR |
352 PHYEV_DEC_ERR;
353 mvs_write_port_irq_mask(mvi, i, tmp);
354
355 msleep(100);
356 mvs_update_phyinfo(mvi, i, 1);
357 }
358
359 /* FIXME: update wide port bitmaps */
360
361 /* little endian for open address and command table, etc. */
362 /*
363 * it seems that ( from the spec ) turning on big-endian won't
364 * do us any good on big-endian machines, need further confirmation
365 */
366 cctl = mr32(MVS_CTL);
367 cctl |= CCTL_ENDIAN_CMD;
368 cctl |= CCTL_ENDIAN_DATA;
369 cctl &= ~CCTL_ENDIAN_OPEN;
370 cctl |= CCTL_ENDIAN_RSP;
371 mw32_f(MVS_CTL, cctl);
372
373 /* reset CMD queue */
374 tmp = mr32(MVS_PCS);
375 tmp |= PCS_CMD_RST;
376 mw32(MVS_PCS, tmp);
377 /* interrupt coalescing may cause missing HW interrput in some case,
378 * and the max count is 0x1ff, while our max slot is 0x200,
379 * it will make count 0.
380 */
381 tmp = 0;
382 mw32(MVS_INT_COAL, tmp);
383
384 tmp = 0x100;
385 mw32(MVS_INT_COAL_TMOUT, tmp);
386
387 /* ladies and gentlemen, start your engines */
388 mw32(MVS_TX_CFG, 0);
389 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
390 mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
391 /* enable CMD/CMPL_Q/RESP mode */
392 mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN |
393 PCS_CMD_EN | PCS_CMD_STOP_ERR);
394
395 /* enable completion queue interrupt */
396 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
397 CINT_DMA_PCIE);
398
399 mw32(MVS_INT_MASK, tmp);
400
401 /* Enable SRS interrupt */
402 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
403
404 return 0;
405}
406
407static int mvs_64xx_ioremap(struct mvs_info *mvi)
408{
409 if (!mvs_ioremap(mvi, 4, 2))
410 return 0;
411 return -1;
412}
413
414static void mvs_64xx_iounmap(struct mvs_info *mvi)
415{
416 mvs_iounmap(mvi->regs);
417 mvs_iounmap(mvi->regs_ex);
418}
419
420static void mvs_64xx_interrupt_enable(struct mvs_info *mvi)
421{
422 void __iomem *regs = mvi->regs;
423 u32 tmp;
424
425 tmp = mr32(MVS_GBL_CTL);
426 mw32(MVS_GBL_CTL, tmp | INT_EN);
427}
428
429static void mvs_64xx_interrupt_disable(struct mvs_info *mvi)
430{
431 void __iomem *regs = mvi->regs;
432 u32 tmp;
433
434 tmp = mr32(MVS_GBL_CTL);
435 mw32(MVS_GBL_CTL, tmp & ~INT_EN);
436}
437
438static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq)
439{
440 void __iomem *regs = mvi->regs;
441 u32 stat;
442
443 if (!(mvi->flags & MVF_FLAG_SOC)) {
444 stat = mr32(MVS_GBL_INT_STAT);
445
446 if (stat == 0 || stat == 0xffffffff)
447 return 0;
448 } else
449 stat = 1;
450 return stat;
451}
452
453static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
454{
455 void __iomem *regs = mvi->regs;
456
457 /* clear CMD_CMPLT ASAP */
458 mw32_f(MVS_INT_STAT, CINT_DONE);
459#ifndef MVS_USE_TASKLET
460 spin_lock(&mvi->lock);
461#endif
462 mvs_int_full(mvi);
463#ifndef MVS_USE_TASKLET
464 spin_unlock(&mvi->lock);
465#endif
466 return IRQ_HANDLED;
467}
468
469static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx)
470{
471 u32 tmp;
472 mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32));
473 mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32));
474 do {
475 tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3));
476 } while (tmp & 1 << (slot_idx % 32));
477 do {
478 tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3));
479 } while (tmp & 1 << (slot_idx % 32));
480}
481
482static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
483 u32 tfs)
484{
485 void __iomem *regs = mvi->regs;
486 u32 tmp;
487
488 if (type == PORT_TYPE_SATA) {
489 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
490 mw32(MVS_INT_STAT_SRS_0, tmp);
491 }
492 mw32(MVS_INT_STAT, CINT_CI_STOP);
493 tmp = mr32(MVS_PCS) | 0xFF00;
494 mw32(MVS_PCS, tmp);
495}
496
497static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
498{
499 void __iomem *regs = mvi->regs;
500 u32 tmp, offs;
501
502 if (*tfs == MVS_ID_NOT_MAPPED)
503 return;
504
505 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
506 if (*tfs < 16) {
507 tmp = mr32(MVS_PCS);
508 mw32(MVS_PCS, tmp & ~offs);
509 } else {
510 tmp = mr32(MVS_CTL);
511 mw32(MVS_CTL, tmp & ~offs);
512 }
513
514 tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs);
515 if (tmp)
516 mw32(MVS_INT_STAT_SRS_0, tmp);
517
518 *tfs = MVS_ID_NOT_MAPPED;
519 return;
520}
521
522static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
523{
524 int i;
525 u32 tmp, offs;
526 void __iomem *regs = mvi->regs;
527
528 if (*tfs != MVS_ID_NOT_MAPPED)
529 return 0;
530
531 tmp = mr32(MVS_PCS);
532
533 for (i = 0; i < mvi->chip->srs_sz; i++) {
534 if (i == 16)
535 tmp = mr32(MVS_CTL);
536 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
537 if (!(tmp & offs)) {
538 *tfs = i;
539
540 if (i < 16)
541 mw32(MVS_PCS, tmp | offs);
542 else
543 mw32(MVS_CTL, tmp | offs);
544 tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i);
545 if (tmp)
546 mw32(MVS_INT_STAT_SRS_0, tmp);
547 return 0;
548 }
549 }
550 return MVS_ID_NOT_MAPPED;
551}
552
553void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
554{
555 int i;
556 struct scatterlist *sg;
557 struct mvs_prd *buf_prd = prd;
558 for_each_sg(scatter, sg, nr, i) {
559 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
560 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
561 buf_prd++;
562 }
563}
564
565static int mvs_64xx_oob_done(struct mvs_info *mvi, int i)
566{
567 u32 phy_st;
568 mvs_write_port_cfg_addr(mvi, i,
569 PHYR_PHY_STAT);
570 phy_st = mvs_read_port_cfg_data(mvi, i);
571 if (phy_st & PHY_OOB_DTCTD)
572 return 1;
573 return 0;
574}
575
576static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i,
577 struct sas_identify_frame *id)
578
579{
580 struct mvs_phy *phy = &mvi->phy[i];
581 struct asd_sas_phy *sas_phy = &phy->sas_phy;
582
583 sas_phy->linkrate =
584 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
585 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
586
587 phy->minimum_linkrate =
588 (phy->phy_status &
589 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
590 phy->maximum_linkrate =
591 (phy->phy_status &
592 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
593
594 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
595 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
596
597 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
598 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
599
600 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
601 phy->att_dev_sas_addr =
602 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
603 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
604 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
605 phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr);
606}
607
608static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
609{
610 u32 tmp;
611 struct mvs_phy *phy = &mvi->phy[i];
612 /* workaround for HW phy decoding error on 1.5g disk drive */
613 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
614 tmp = mvs_read_port_vsr_data(mvi, i);
615 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
616 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
617 SAS_LINK_RATE_1_5_GBPS)
618 tmp &= ~PHY_MODE6_LATECLK;
619 else
620 tmp |= PHY_MODE6_LATECLK;
621 mvs_write_port_vsr_data(mvi, i, tmp);
622}
623
624void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
625 struct sas_phy_linkrates *rates)
626{
627 u32 lrmin = 0, lrmax = 0;
628 u32 tmp;
629
630 tmp = mvs_read_phy_ctl(mvi, phy_id);
631 lrmin = (rates->minimum_linkrate << 8);
632 lrmax = (rates->maximum_linkrate << 12);
633
634 if (lrmin) {
635 tmp &= ~(0xf << 8);
636 tmp |= lrmin;
637 }
638 if (lrmax) {
639 tmp &= ~(0xf << 12);
640 tmp |= lrmax;
641 }
642 mvs_write_phy_ctl(mvi, phy_id, tmp);
643 mvs_64xx_phy_reset(mvi, phy_id, 1);
644}
645
646static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
647{
648 u32 tmp;
649 void __iomem *regs = mvi->regs;
650 tmp = mr32(MVS_PCS);
651 mw32(MVS_PCS, tmp & 0xFFFF);
652 mw32(MVS_PCS, tmp);
653 tmp = mr32(MVS_CTL);
654 mw32(MVS_CTL, tmp & 0xFFFF);
655 mw32(MVS_CTL, tmp);
656}
657
658
659u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
660{
661 void __iomem *regs = mvi->regs_ex;
662 return ior32(SPI_DATA_REG_64XX);
663}
664
665void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
666{
667 void __iomem *regs = mvi->regs_ex;
668 iow32(SPI_DATA_REG_64XX, data);
669}
670
671
672int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
673 u32 *dwCmd,
674 u8 cmd,
675 u8 read,
676 u8 length,
677 u32 addr
678 )
679{
680 u32 dwTmp;
681
682 dwTmp = ((u32)cmd << 24) | ((u32)length << 19);
683 if (read)
684 dwTmp |= 1U<<23;
685
686 if (addr != MV_MAX_U32) {
687 dwTmp |= 1U<<22;
688 dwTmp |= (addr & 0x0003FFFF);
689 }
690
691 *dwCmd = dwTmp;
692 return 0;
693}
694
695
696int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
697{
698 void __iomem *regs = mvi->regs_ex;
699 int retry;
700
701 for (retry = 0; retry < 1; retry++) {
702 iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE);
703 iow32(SPI_CMD_REG_64XX, cmd);
704 iow32(SPI_CTRL_REG_64XX,
705 SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART);
706 }
707
708 return 0;
709}
710
711int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
712{
713 void __iomem *regs = mvi->regs_ex;
714 u32 i, dwTmp;
715
716 for (i = 0; i < timeout; i++) {
717 dwTmp = ior32(SPI_CTRL_REG_64XX);
718 if (!(dwTmp & SPI_CTRL_SPISTART))
719 return 0;
720 msleep(10);
721 }
722
723 return -1;
724}
725
726#ifndef DISABLE_HOTPLUG_DMA_FIX
727void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
728{
729 int i;
730 struct mvs_prd *buf_prd = prd;
731 buf_prd += from;
732 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
733 buf_prd->addr = cpu_to_le64(buf_dma);
734 buf_prd->len = cpu_to_le32(buf_len);
735 ++buf_prd;
736 }
737}
738#endif
739
740const struct mvs_dispatch mvs_64xx_dispatch = {
741 "mv64xx",
742 mvs_64xx_init,
743 NULL,
744 mvs_64xx_ioremap,
745 mvs_64xx_iounmap,
746 mvs_64xx_isr,
747 mvs_64xx_isr_status,
748 mvs_64xx_interrupt_enable,
749 mvs_64xx_interrupt_disable,
750 mvs_read_phy_ctl,
751 mvs_write_phy_ctl,
752 mvs_read_port_cfg_data,
753 mvs_write_port_cfg_data,
754 mvs_write_port_cfg_addr,
755 mvs_read_port_vsr_data,
756 mvs_write_port_vsr_data,
757 mvs_write_port_vsr_addr,
758 mvs_read_port_irq_stat,
759 mvs_write_port_irq_stat,
760 mvs_read_port_irq_mask,
761 mvs_write_port_irq_mask,
762 mvs_get_sas_addr,
763 mvs_64xx_command_active,
764 mvs_64xx_issue_stop,
765 mvs_start_delivery,
766 mvs_rx_update,
767 mvs_int_full,
768 mvs_64xx_assign_reg_set,
769 mvs_64xx_free_reg_set,
770 mvs_get_prd_size,
771 mvs_get_prd_count,
772 mvs_64xx_make_prd,
773 mvs_64xx_detect_porttype,
774 mvs_64xx_oob_done,
775 mvs_64xx_fix_phy_info,
776 mvs_64xx_phy_work_around,
777 mvs_64xx_phy_set_link_rate,
778 mvs_hw_max_link_rate,
779 mvs_64xx_phy_disable,
780 mvs_64xx_phy_enable,
781 mvs_64xx_phy_reset,
782 mvs_64xx_stp_reset,
783 mvs_64xx_clear_active_cmds,
784 mvs_64xx_spi_read_data,
785 mvs_64xx_spi_write_data,
786 mvs_64xx_spi_buildcmd,
787 mvs_64xx_spi_issuecmd,
788 mvs_64xx_spi_waitdataready,
789#ifndef DISABLE_HOTPLUG_DMA_FIX
790 mvs_64xx_fix_dma,
791#endif
792};
793
diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h
new file mode 100644
index 000000000000..42e947d9795e
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_64xx.h
@@ -0,0 +1,151 @@
1/*
2 * Marvell 88SE64xx hardware specific head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MVS64XX_REG_H_
26#define _MVS64XX_REG_H_
27
28#include <linux/types.h>
29
30#define MAX_LINK_RATE SAS_LINK_RATE_3_0_GBPS
31
32/* enhanced mode registers (BAR4) */
33enum hw_registers {
34 MVS_GBL_CTL = 0x04, /* global control */
35 MVS_GBL_INT_STAT = 0x08, /* global irq status */
36 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
37
38 MVS_PHY_CTL = 0x40, /* SOC PHY Control */
39 MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
40
41 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
42
43 MVS_CTL = 0x100, /* SAS/SATA port configuration */
44 MVS_PCS = 0x104, /* SAS/SATA port control/status */
45 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
46 MVS_CMD_LIST_HI = 0x10C,
47 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
48 MVS_RX_FIS_HI = 0x114,
49
50 MVS_TX_CFG = 0x120, /* TX configuration */
51 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
52 MVS_TX_HI = 0x128,
53
54 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
55 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
56 MVS_RX_CFG = 0x134, /* RX configuration */
57 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
58 MVS_RX_HI = 0x13C,
59 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
60
61 MVS_INT_COAL = 0x148, /* Int coalescing config */
62 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
63 MVS_INT_STAT = 0x150, /* Central int status */
64 MVS_INT_MASK = 0x154, /* Central int enable */
65 MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
66 MVS_INT_MASK_SRS_0 = 0x15C,
67
68 /* ports 1-3 follow after this */
69 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
70 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
71 /* ports 5-7 follow after this */
72 MVS_P4_INT_STAT = 0x200, /* Port4 interrupt status */
73 MVS_P4_INT_MASK = 0x204, /* Port4 interrupt enable mask */
74
75 /* ports 1-3 follow after this */
76 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
77 /* ports 5-7 follow after this */
78 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
79
80 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
81 MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
82
83 /* ports 1-3 follow after this */
84 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
85 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
86 /* ports 5-7 follow after this */
87 MVS_P4_CFG_ADDR = 0x230, /* Port4 config address */
88 MVS_P4_CFG_DATA = 0x234, /* Port4 config data */
89
90 /* ports 1-3 follow after this */
91 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
92 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
93 /* ports 5-7 follow after this */
94 MVS_P4_VSR_ADDR = 0x250, /* port4 VSR addr */
95 MVS_P4_VSR_DATA = 0x254, /* port4 VSR data */
96};
97
98enum pci_cfg_registers {
99 PCR_PHY_CTL = 0x40,
100 PCR_PHY_CTL2 = 0x90,
101 PCR_DEV_CTRL = 0xE8,
102 PCR_LINK_STAT = 0xF2,
103};
104
105/* SAS/SATA Vendor Specific Port Registers */
106enum sas_sata_vsp_regs {
107 VSR_PHY_STAT = 0x00, /* Phy Status */
108 VSR_PHY_MODE1 = 0x01, /* phy tx */
109 VSR_PHY_MODE2 = 0x02, /* tx scc */
110 VSR_PHY_MODE3 = 0x03, /* pll */
111 VSR_PHY_MODE4 = 0x04, /* VCO */
112 VSR_PHY_MODE5 = 0x05, /* Rx */
113 VSR_PHY_MODE6 = 0x06, /* CDR */
114 VSR_PHY_MODE7 = 0x07, /* Impedance */
115 VSR_PHY_MODE8 = 0x08, /* Voltage */
116 VSR_PHY_MODE9 = 0x09, /* Test */
117 VSR_PHY_MODE10 = 0x0A, /* Power */
118 VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
119 VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
120 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
121};
122
123enum chip_register_bits {
124 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
125 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
126 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
127 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
128 (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
129};
130
131#define MAX_SG_ENTRY 64
132
133struct mvs_prd {
134 __le64 addr; /* 64-bit buffer address */
135 __le32 reserved;
136 __le32 len; /* 16-bit length */
137};
138
139#define SPI_CTRL_REG 0xc0
140#define SPI_CTRL_VENDOR_ENABLE (1U<<29)
141#define SPI_CTRL_SPIRDY (1U<<22)
142#define SPI_CTRL_SPISTART (1U<<20)
143
144#define SPI_CMD_REG 0xc4
145#define SPI_DATA_REG 0xc8
146
147#define SPI_CTRL_REG_64XX 0x10
148#define SPI_CMD_REG_64XX 0x14
149#define SPI_DATA_REG_64XX 0x18
150
151#endif
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
new file mode 100644
index 000000000000..0940fae19d20
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -0,0 +1,672 @@
1/*
2 * Marvell 88SE94xx hardware specific
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#include "mv_sas.h"
26#include "mv_94xx.h"
27#include "mv_chips.h"
28
29static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
30{
31 u32 reg;
32 struct mvs_phy *phy = &mvi->phy[i];
33 u32 phy_status;
34
35 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3);
36 reg = mvs_read_port_vsr_data(mvi, i);
37 phy_status = ((reg & 0x3f0000) >> 16) & 0xff;
38 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
39 switch (phy_status) {
40 case 0x10:
41 phy->phy_type |= PORT_TYPE_SAS;
42 break;
43 case 0x1d:
44 default:
45 phy->phy_type |= PORT_TYPE_SATA;
46 break;
47 }
48}
49
50static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
51{
52 void __iomem *regs = mvi->regs;
53 u32 tmp;
54
55 tmp = mr32(MVS_PCS);
56 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
57 mw32(MVS_PCS, tmp);
58}
59
60static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
61{
62 u32 tmp;
63
64 tmp = mvs_read_port_irq_stat(mvi, phy_id);
65 tmp &= ~PHYEV_RDY_CH;
66 mvs_write_port_irq_stat(mvi, phy_id, tmp);
67 if (hard) {
68 tmp = mvs_read_phy_ctl(mvi, phy_id);
69 tmp |= PHY_RST_HARD;
70 mvs_write_phy_ctl(mvi, phy_id, tmp);
71 do {
72 tmp = mvs_read_phy_ctl(mvi, phy_id);
73 } while (tmp & PHY_RST_HARD);
74 } else {
75 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT);
76 tmp = mvs_read_port_vsr_data(mvi, phy_id);
77 tmp |= PHY_RST;
78 mvs_write_port_vsr_data(mvi, phy_id, tmp);
79 }
80}
81
82static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
83{
84 u32 tmp;
85 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
86 tmp = mvs_read_port_vsr_data(mvi, phy_id);
87 mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000);
88}
89
90static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
91{
92 mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4);
93 mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
94 mvs_write_port_vsr_addr(mvi, phy_id, 0x104);
95 mvs_write_port_vsr_data(mvi, phy_id, 0x00018080);
96 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
97 mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff);
98}
99
100static int __devinit mvs_94xx_init(struct mvs_info *mvi)
101{
102 void __iomem *regs = mvi->regs;
103 int i;
104 u32 tmp, cctl;
105
106 mvs_show_pcie_usage(mvi);
107 if (mvi->flags & MVF_FLAG_SOC) {
108 tmp = mr32(MVS_PHY_CTL);
109 tmp &= ~PCTL_PWR_OFF;
110 tmp |= PCTL_PHY_DSBL;
111 mw32(MVS_PHY_CTL, tmp);
112 }
113
114 /* Init Chip */
115 /* make sure RST is set; HBA_RST /should/ have done that for us */
116 cctl = mr32(MVS_CTL) & 0xFFFF;
117 if (cctl & CCTL_RST)
118 cctl &= ~CCTL_RST;
119 else
120 mw32_f(MVS_CTL, cctl | CCTL_RST);
121
122 if (mvi->flags & MVF_FLAG_SOC) {
123 tmp = mr32(MVS_PHY_CTL);
124 tmp &= ~PCTL_PWR_OFF;
125 tmp |= PCTL_COM_ON;
126 tmp &= ~PCTL_PHY_DSBL;
127 tmp |= PCTL_LINK_RST;
128 mw32(MVS_PHY_CTL, tmp);
129 msleep(100);
130 tmp &= ~PCTL_LINK_RST;
131 mw32(MVS_PHY_CTL, tmp);
132 msleep(100);
133 }
134
135 /* reset control */
136 mw32(MVS_PCS, 0); /* MVS_PCS */
137 mw32(MVS_STP_REG_SET_0, 0);
138 mw32(MVS_STP_REG_SET_1, 0);
139
140 /* init phys */
141 mvs_phy_hacks(mvi);
142
143 /* disable Multiplexing, enable phy implemented */
144 mw32(MVS_PORTS_IMP, 0xFF);
145
146
147 mw32(MVS_PA_VSR_ADDR, 0x00000104);
148 mw32(MVS_PA_VSR_PORT, 0x00018080);
149 mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
150 mw32(MVS_PA_VSR_PORT, 0x0084ffff);
151
152 /* set LED blink when IO*/
153 mw32(MVS_PA_VSR_ADDR, 0x00000030);
154 tmp = mr32(MVS_PA_VSR_PORT);
155 tmp &= 0xFFFF00FF;
156 tmp |= 0x00003300;
157 mw32(MVS_PA_VSR_PORT, tmp);
158
159 mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
160 mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
161
162 mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
163 mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
164
165 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
166 mw32(MVS_TX_LO, mvi->tx_dma);
167 mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
168
169 mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
170 mw32(MVS_RX_LO, mvi->rx_dma);
171 mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
172
173 for (i = 0; i < mvi->chip->n_phy; i++) {
174 mvs_94xx_phy_disable(mvi, i);
175 /* set phy local SAS address */
176 mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
177 (mvi->phy[i].dev_sas_addr));
178
179 mvs_94xx_enable_xmt(mvi, i);
180 mvs_94xx_phy_enable(mvi, i);
181
182 mvs_94xx_phy_reset(mvi, i, 1);
183 msleep(500);
184 mvs_94xx_detect_porttype(mvi, i);
185 }
186
187 if (mvi->flags & MVF_FLAG_SOC) {
188 /* set select registers */
189 writel(0x0E008000, regs + 0x000);
190 writel(0x59000008, regs + 0x004);
191 writel(0x20, regs + 0x008);
192 writel(0x20, regs + 0x00c);
193 writel(0x20, regs + 0x010);
194 writel(0x20, regs + 0x014);
195 writel(0x20, regs + 0x018);
196 writel(0x20, regs + 0x01c);
197 }
198 for (i = 0; i < mvi->chip->n_phy; i++) {
199 /* clear phy int status */
200 tmp = mvs_read_port_irq_stat(mvi, i);
201 tmp &= ~PHYEV_SIG_FIS;
202 mvs_write_port_irq_stat(mvi, i, tmp);
203
204 /* set phy int mask */
205 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH |
206 PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ;
207 mvs_write_port_irq_mask(mvi, i, tmp);
208
209 msleep(100);
210 mvs_update_phyinfo(mvi, i, 1);
211 }
212
213 /* FIXME: update wide port bitmaps */
214
215 /* little endian for open address and command table, etc. */
216 /*
217 * it seems that ( from the spec ) turning on big-endian won't
218 * do us any good on big-endian machines, need further confirmation
219 */
220 cctl = mr32(MVS_CTL);
221 cctl |= CCTL_ENDIAN_CMD;
222 cctl |= CCTL_ENDIAN_DATA;
223 cctl &= ~CCTL_ENDIAN_OPEN;
224 cctl |= CCTL_ENDIAN_RSP;
225 mw32_f(MVS_CTL, cctl);
226
227 /* reset CMD queue */
228 tmp = mr32(MVS_PCS);
229 tmp |= PCS_CMD_RST;
230 mw32(MVS_PCS, tmp);
231 /* interrupt coalescing may cause missing HW interrput in some case,
232 * and the max count is 0x1ff, while our max slot is 0x200,
233 * it will make count 0.
234 */
235 tmp = 0;
236 mw32(MVS_INT_COAL, tmp);
237
238 tmp = 0x100;
239 mw32(MVS_INT_COAL_TMOUT, tmp);
240
241 /* ladies and gentlemen, start your engines */
242 mw32(MVS_TX_CFG, 0);
243 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
244 mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
245 /* enable CMD/CMPL_Q/RESP mode */
246 mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN |
247 PCS_CMD_EN | PCS_CMD_STOP_ERR);
248
249 /* enable completion queue interrupt */
250 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
251 CINT_DMA_PCIE);
252 tmp |= CINT_PHY_MASK;
253 mw32(MVS_INT_MASK, tmp);
254
255 /* Enable SRS interrupt */
256 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
257
258 return 0;
259}
260
261static int mvs_94xx_ioremap(struct mvs_info *mvi)
262{
263 if (!mvs_ioremap(mvi, 2, -1)) {
264 mvi->regs_ex = mvi->regs + 0x10200;
265 mvi->regs += 0x20000;
266 if (mvi->id == 1)
267 mvi->regs += 0x4000;
268 return 0;
269 }
270 return -1;
271}
272
273static void mvs_94xx_iounmap(struct mvs_info *mvi)
274{
275 if (mvi->regs) {
276 mvi->regs -= 0x20000;
277 if (mvi->id == 1)
278 mvi->regs -= 0x4000;
279 mvs_iounmap(mvi->regs);
280 }
281}
282
283static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
284{
285 void __iomem *regs = mvi->regs_ex;
286 u32 tmp;
287
288 tmp = mr32(MVS_GBL_CTL);
289 tmp |= (IRQ_SAS_A | IRQ_SAS_B);
290 mw32(MVS_GBL_INT_STAT, tmp);
291 writel(tmp, regs + 0x0C);
292 writel(tmp, regs + 0x10);
293 writel(tmp, regs + 0x14);
294 writel(tmp, regs + 0x18);
295 mw32(MVS_GBL_CTL, tmp);
296}
297
298static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
299{
300 void __iomem *regs = mvi->regs_ex;
301 u32 tmp;
302
303 tmp = mr32(MVS_GBL_CTL);
304
305 tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
306 mw32(MVS_GBL_INT_STAT, tmp);
307 writel(tmp, regs + 0x0C);
308 writel(tmp, regs + 0x10);
309 writel(tmp, regs + 0x14);
310 writel(tmp, regs + 0x18);
311 mw32(MVS_GBL_CTL, tmp);
312}
313
314static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
315{
316 void __iomem *regs = mvi->regs_ex;
317 u32 stat = 0;
318 if (!(mvi->flags & MVF_FLAG_SOC)) {
319 stat = mr32(MVS_GBL_INT_STAT);
320
321 if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
322 return 0;
323 }
324 return stat;
325}
326
327static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
328{
329 void __iomem *regs = mvi->regs;
330
331 if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
332 ((stat & IRQ_SAS_B) && mvi->id == 1)) {
333 mw32_f(MVS_INT_STAT, CINT_DONE);
334 #ifndef MVS_USE_TASKLET
335 spin_lock(&mvi->lock);
336 #endif
337 mvs_int_full(mvi);
338 #ifndef MVS_USE_TASKLET
339 spin_unlock(&mvi->lock);
340 #endif
341 }
342 return IRQ_HANDLED;
343}
344
345static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
346{
347 u32 tmp;
348 mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32));
349 do {
350 tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3));
351 } while (tmp & 1 << (slot_idx % 32));
352}
353
354static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
355 u32 tfs)
356{
357 void __iomem *regs = mvi->regs;
358 u32 tmp;
359
360 if (type == PORT_TYPE_SATA) {
361 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
362 mw32(MVS_INT_STAT_SRS_0, tmp);
363 }
364 mw32(MVS_INT_STAT, CINT_CI_STOP);
365 tmp = mr32(MVS_PCS) | 0xFF00;
366 mw32(MVS_PCS, tmp);
367}
368
369static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
370{
371 void __iomem *regs = mvi->regs;
372 u32 tmp;
373 u8 reg_set = *tfs;
374
375 if (*tfs == MVS_ID_NOT_MAPPED)
376 return;
377
378 mvi->sata_reg_set &= ~bit(reg_set);
379 if (reg_set < 32) {
380 w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
381 tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
382 if (tmp)
383 mw32(MVS_INT_STAT_SRS_0, tmp);
384 } else {
385 w_reg_set_enable(reg_set, mvi->sata_reg_set);
386 tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
387 if (tmp)
388 mw32(MVS_INT_STAT_SRS_1, tmp);
389 }
390
391 *tfs = MVS_ID_NOT_MAPPED;
392
393 return;
394}
395
396static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
397{
398 int i;
399 void __iomem *regs = mvi->regs;
400
401 if (*tfs != MVS_ID_NOT_MAPPED)
402 return 0;
403
404 i = mv_ffc64(mvi->sata_reg_set);
405 if (i > 32) {
406 mvi->sata_reg_set |= bit(i);
407 w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
408 *tfs = i;
409 return 0;
410 } else if (i >= 0) {
411 mvi->sata_reg_set |= bit(i);
412 w_reg_set_enable(i, (u32)mvi->sata_reg_set);
413 *tfs = i;
414 return 0;
415 }
416 return MVS_ID_NOT_MAPPED;
417}
418
419static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
420{
421 int i;
422 struct scatterlist *sg;
423 struct mvs_prd *buf_prd = prd;
424 for_each_sg(scatter, sg, nr, i) {
425 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
426 buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
427 buf_prd++;
428 }
429}
430
431static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
432{
433 u32 phy_st;
434 phy_st = mvs_read_phy_ctl(mvi, i);
435 if (phy_st & PHY_READY_MASK) /* phy ready */
436 return 1;
437 return 0;
438}
439
440static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
441 struct sas_identify_frame *id)
442{
443 int i;
444 u32 id_frame[7];
445
446 for (i = 0; i < 7; i++) {
447 mvs_write_port_cfg_addr(mvi, port_id,
448 CONFIG_ID_FRAME0 + i * 4);
449 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
450 }
451 memcpy(id, id_frame, 28);
452}
453
454static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
455 struct sas_identify_frame *id)
456{
457 int i;
458 u32 id_frame[7];
459
460 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
461 for (i = 0; i < 7; i++) {
462 mvs_write_port_cfg_addr(mvi, port_id,
463 CONFIG_ATT_ID_FRAME0 + i * 4);
464 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
465 mv_dprintk("94xx phy %d atta frame %d %x.\n",
466 port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
467 }
468 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
469 memcpy(id, id_frame, 28);
470}
471
472static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id)
473{
474 u32 att_dev_info = 0;
475
476 att_dev_info |= id->dev_type;
477 if (id->stp_iport)
478 att_dev_info |= PORT_DEV_STP_INIT;
479 if (id->smp_iport)
480 att_dev_info |= PORT_DEV_SMP_INIT;
481 if (id->ssp_iport)
482 att_dev_info |= PORT_DEV_SSP_INIT;
483 if (id->stp_tport)
484 att_dev_info |= PORT_DEV_STP_TRGT;
485 if (id->smp_tport)
486 att_dev_info |= PORT_DEV_SMP_TRGT;
487 if (id->ssp_tport)
488 att_dev_info |= PORT_DEV_SSP_TRGT;
489
490 att_dev_info |= (u32)id->phy_id<<24;
491 return att_dev_info;
492}
493
494static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id)
495{
496 return mvs_94xx_make_dev_info(id);
497}
498
499static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
500 struct sas_identify_frame *id)
501{
502 struct mvs_phy *phy = &mvi->phy[i];
503 struct asd_sas_phy *sas_phy = &phy->sas_phy;
504 mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status);
505 sas_phy->linkrate =
506 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
507 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
508 sas_phy->linkrate += 0x8;
509 mv_dprintk("get link rate is %d\n", sas_phy->linkrate);
510 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
511 phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
512 mvs_94xx_get_dev_identify_frame(mvi, i, id);
513 phy->dev_info = mvs_94xx_make_dev_info(id);
514
515 if (phy->phy_type & PORT_TYPE_SAS) {
516 mvs_94xx_get_att_identify_frame(mvi, i, id);
517 phy->att_dev_info = mvs_94xx_make_att_info(id);
518 phy->att_dev_sas_addr = *(u64 *)id->sas_addr;
519 } else {
520 phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
521 }
522
523}
524
525void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
526 struct sas_phy_linkrates *rates)
527{
528 /* TODO */
529}
530
531static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
532{
533 u32 tmp;
534 void __iomem *regs = mvi->regs;
535 tmp = mr32(MVS_STP_REG_SET_0);
536 mw32(MVS_STP_REG_SET_0, 0);
537 mw32(MVS_STP_REG_SET_0, tmp);
538 tmp = mr32(MVS_STP_REG_SET_1);
539 mw32(MVS_STP_REG_SET_1, 0);
540 mw32(MVS_STP_REG_SET_1, tmp);
541}
542
543
544u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
545{
546 void __iomem *regs = mvi->regs_ex - 0x10200;
547 return mr32(SPI_RD_DATA_REG_94XX);
548}
549
550void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
551{
552 void __iomem *regs = mvi->regs_ex - 0x10200;
553 mw32(SPI_RD_DATA_REG_94XX, data);
554}
555
556
557int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
558 u32 *dwCmd,
559 u8 cmd,
560 u8 read,
561 u8 length,
562 u32 addr
563 )
564{
565 void __iomem *regs = mvi->regs_ex - 0x10200;
566 u32 dwTmp;
567
568 dwTmp = ((u32)cmd << 8) | ((u32)length << 4);
569 if (read)
570 dwTmp |= SPI_CTRL_READ_94XX;
571
572 if (addr != MV_MAX_U32) {
573 mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL));
574 dwTmp |= SPI_ADDR_VLD_94XX;
575 }
576
577 *dwCmd = dwTmp;
578 return 0;
579}
580
581
582int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
583{
584 void __iomem *regs = mvi->regs_ex - 0x10200;
585 mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
586
587 return 0;
588}
589
590int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
591{
592 void __iomem *regs = mvi->regs_ex - 0x10200;
593 u32 i, dwTmp;
594
595 for (i = 0; i < timeout; i++) {
596 dwTmp = mr32(SPI_CTRL_REG_94XX);
597 if (!(dwTmp & SPI_CTRL_SpiStart_94XX))
598 return 0;
599 msleep(10);
600 }
601
602 return -1;
603}
604
605#ifndef DISABLE_HOTPLUG_DMA_FIX
606void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
607{
608 int i;
609 struct mvs_prd *buf_prd = prd;
610 buf_prd += from;
611 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
612 buf_prd->addr = cpu_to_le64(buf_dma);
613 buf_prd->im_len.len = cpu_to_le32(buf_len);
614 ++buf_prd;
615 }
616}
617#endif
618
619const struct mvs_dispatch mvs_94xx_dispatch = {
620 "mv94xx",
621 mvs_94xx_init,
622 NULL,
623 mvs_94xx_ioremap,
624 mvs_94xx_iounmap,
625 mvs_94xx_isr,
626 mvs_94xx_isr_status,
627 mvs_94xx_interrupt_enable,
628 mvs_94xx_interrupt_disable,
629 mvs_read_phy_ctl,
630 mvs_write_phy_ctl,
631 mvs_read_port_cfg_data,
632 mvs_write_port_cfg_data,
633 mvs_write_port_cfg_addr,
634 mvs_read_port_vsr_data,
635 mvs_write_port_vsr_data,
636 mvs_write_port_vsr_addr,
637 mvs_read_port_irq_stat,
638 mvs_write_port_irq_stat,
639 mvs_read_port_irq_mask,
640 mvs_write_port_irq_mask,
641 mvs_get_sas_addr,
642 mvs_94xx_command_active,
643 mvs_94xx_issue_stop,
644 mvs_start_delivery,
645 mvs_rx_update,
646 mvs_int_full,
647 mvs_94xx_assign_reg_set,
648 mvs_94xx_free_reg_set,
649 mvs_get_prd_size,
650 mvs_get_prd_count,
651 mvs_94xx_make_prd,
652 mvs_94xx_detect_porttype,
653 mvs_94xx_oob_done,
654 mvs_94xx_fix_phy_info,
655 NULL,
656 mvs_94xx_phy_set_link_rate,
657 mvs_hw_max_link_rate,
658 mvs_94xx_phy_disable,
659 mvs_94xx_phy_enable,
660 mvs_94xx_phy_reset,
661 NULL,
662 mvs_94xx_clear_active_cmds,
663 mvs_94xx_spi_read_data,
664 mvs_94xx_spi_write_data,
665 mvs_94xx_spi_buildcmd,
666 mvs_94xx_spi_issuecmd,
667 mvs_94xx_spi_waitdataready,
668#ifndef DISABLE_HOTPLUG_DMA_FIX
669 mvs_94xx_fix_dma,
670#endif
671};
672
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
new file mode 100644
index 000000000000..23ed9b164669
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -0,0 +1,222 @@
1/*
2 * Marvell 88SE94xx hardware specific head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MVS94XX_REG_H_
26#define _MVS94XX_REG_H_
27
28#include <linux/types.h>
29
30#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS
31
32enum hw_registers {
33 MVS_GBL_CTL = 0x04, /* global control */
34 MVS_GBL_INT_STAT = 0x00, /* global irq status */
35 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
36
37 MVS_PHY_CTL = 0x40, /* SOC PHY Control */
38 MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
39
40 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
41
42 MVS_CTL = 0x100, /* SAS/SATA port configuration */
43 MVS_PCS = 0x104, /* SAS/SATA port control/status */
44 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
45 MVS_CMD_LIST_HI = 0x10C,
46 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
47 MVS_RX_FIS_HI = 0x114,
48 MVS_STP_REG_SET_0 = 0x118, /* STP/SATA Register Set Enable */
49 MVS_STP_REG_SET_1 = 0x11C,
50 MVS_TX_CFG = 0x120, /* TX configuration */
51 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
52 MVS_TX_HI = 0x128,
53
54 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
55 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
56 MVS_RX_CFG = 0x134, /* RX configuration */
57 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
58 MVS_RX_HI = 0x13C,
59 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
60
61 MVS_INT_COAL = 0x148, /* Int coalescing config */
62 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
63 MVS_INT_STAT = 0x150, /* Central int status */
64 MVS_INT_MASK = 0x154, /* Central int enable */
65 MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
66 MVS_INT_MASK_SRS_0 = 0x15C,
67 MVS_INT_STAT_SRS_1 = 0x160,
68 MVS_INT_MASK_SRS_1 = 0x164,
69 MVS_NON_NCQ_ERR_0 = 0x168, /* SRS Non-specific NCQ Error */
70 MVS_NON_NCQ_ERR_1 = 0x16C,
71 MVS_CMD_ADDR = 0x170, /* Command register port (addr) */
72 MVS_CMD_DATA = 0x174, /* Command register port (data) */
73 MVS_MEM_PARITY_ERR = 0x178, /* Memory parity error */
74
75 /* ports 1-3 follow after this */
76 MVS_P0_INT_STAT = 0x180, /* port0 interrupt status */
77 MVS_P0_INT_MASK = 0x184, /* port0 interrupt mask */
78 /* ports 5-7 follow after this */
79 MVS_P4_INT_STAT = 0x1A0, /* Port4 interrupt status */
80 MVS_P4_INT_MASK = 0x1A4, /* Port4 interrupt enable mask */
81
82 /* ports 1-3 follow after this */
83 MVS_P0_SER_CTLSTAT = 0x1D0, /* port0 serial control/status */
84 /* ports 5-7 follow after this */
85 MVS_P4_SER_CTLSTAT = 0x1E0, /* port4 serial control/status */
86
87 /* ports 1-3 follow after this */
88 MVS_P0_CFG_ADDR = 0x200, /* port0 phy register address */
89 MVS_P0_CFG_DATA = 0x204, /* port0 phy register data */
90 /* ports 5-7 follow after this */
91 MVS_P4_CFG_ADDR = 0x220, /* Port4 config address */
92 MVS_P4_CFG_DATA = 0x224, /* Port4 config data */
93
94 /* phys 1-3 follow after this */
95 MVS_P0_VSR_ADDR = 0x250, /* phy0 VSR address */
96 MVS_P0_VSR_DATA = 0x254, /* phy0 VSR data */
97 /* phys 1-3 follow after this */
98 /* multiplexing */
99 MVS_P4_VSR_ADDR = 0x250, /* phy4 VSR address */
100 MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */
101 MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */
102 MVS_PA_VSR_PORT = 0x294, /* All port VSR data */
103};
104
105enum pci_cfg_registers {
106 PCR_PHY_CTL = 0x40,
107 PCR_PHY_CTL2 = 0x90,
108 PCR_DEV_CTRL = 0x78,
109 PCR_LINK_STAT = 0x82,
110};
111
112/* SAS/SATA Vendor Specific Port Registers */
113enum sas_sata_vsp_regs {
114 VSR_PHY_STAT = 0x00 * 4, /* Phy Status */
115 VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */
116 VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */
117 VSR_PHY_MODE3 = 0x03 * 4, /* pll */
118 VSR_PHY_MODE4 = 0x04 * 4, /* VCO */
119 VSR_PHY_MODE5 = 0x05 * 4, /* Rx */
120 VSR_PHY_MODE6 = 0x06 * 4, /* CDR */
121 VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */
122 VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */
123 VSR_PHY_MODE9 = 0x09 * 4, /* Test */
124 VSR_PHY_MODE10 = 0x0A * 4, /* Power */
125 VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */
126 VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */
127 VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */
128};
129
130enum chip_register_bits {
131 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
132 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
133 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12),
134 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
135 (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
136};
137
138enum pci_interrupt_cause {
139 /* MAIN_IRQ_CAUSE (R10200) Bits*/
140 IRQ_COM_IN_I2O_IOP0 = (1 << 0),
141 IRQ_COM_IN_I2O_IOP1 = (1 << 1),
142 IRQ_COM_IN_I2O_IOP2 = (1 << 2),
143 IRQ_COM_IN_I2O_IOP3 = (1 << 3),
144 IRQ_COM_OUT_I2O_HOS0 = (1 << 4),
145 IRQ_COM_OUT_I2O_HOS1 = (1 << 5),
146 IRQ_COM_OUT_I2O_HOS2 = (1 << 6),
147 IRQ_COM_OUT_I2O_HOS3 = (1 << 7),
148 IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8),
149 IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9),
150 IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10),
151 IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11),
152 IRQ_PCIF_DRBL0 = (1 << 12),
153 IRQ_PCIF_DRBL1 = (1 << 13),
154 IRQ_PCIF_DRBL2 = (1 << 14),
155 IRQ_PCIF_DRBL3 = (1 << 15),
156 IRQ_XOR_A = (1 << 16),
157 IRQ_XOR_B = (1 << 17),
158 IRQ_SAS_A = (1 << 18),
159 IRQ_SAS_B = (1 << 19),
160 IRQ_CPU_CNTRL = (1 << 20),
161 IRQ_GPIO = (1 << 21),
162 IRQ_UART = (1 << 22),
163 IRQ_SPI = (1 << 23),
164 IRQ_I2C = (1 << 24),
165 IRQ_SGPIO = (1 << 25),
166 IRQ_COM_ERR = (1 << 29),
167 IRQ_I2O_ERR = (1 << 30),
168 IRQ_PCIE_ERR = (1 << 31),
169};
170
171#define MAX_SG_ENTRY 255
172
173struct mvs_prd_imt {
174 __le32 len:22;
175 u8 _r_a:2;
176 u8 misc_ctl:4;
177 u8 inter_sel:4;
178};
179
180struct mvs_prd {
181 /* 64-bit buffer address */
182 __le64 addr;
183 /* 22-bit length */
184 struct mvs_prd_imt im_len;
185} __attribute__ ((packed));
186
187#define SPI_CTRL_REG_94XX 0xc800
188#define SPI_ADDR_REG_94XX 0xc804
189#define SPI_WR_DATA_REG_94XX 0xc808
190#define SPI_RD_DATA_REG_94XX 0xc80c
191#define SPI_CTRL_READ_94XX (1U << 2)
192#define SPI_ADDR_VLD_94XX (1U << 1)
193#define SPI_CTRL_SpiStart_94XX (1U << 0)
194
195#define mv_ffc(x) ffz(x)
196
197static inline int
198mv_ffc64(u64 v)
199{
200 int i;
201 i = mv_ffc((u32)v);
202 if (i >= 0)
203 return i;
204 i = mv_ffc((u32)(v>>32));
205
206 if (i != 0)
207 return 32 + i;
208
209 return -1;
210}
211
212#define r_reg_set_enable(i) \
213 (((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \
214 mr32(MVS_STP_REG_SET_0))
215
216#define w_reg_set_enable(i, tmp) \
217 (((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \
218 mw32(MVS_STP_REG_SET_0, tmp))
219
220extern const struct mvs_dispatch mvs_94xx_dispatch;
221#endif
222
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
new file mode 100644
index 000000000000..a67e1c4172f9
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -0,0 +1,280 @@
1/*
2 * Marvell 88SE64xx/88SE94xx register IO interface
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25
26#ifndef _MV_CHIPS_H_
27#define _MV_CHIPS_H_
28
29#define mr32(reg) readl(regs + reg)
30#define mw32(reg, val) writel((val), regs + reg)
31#define mw32_f(reg, val) do { \
32 mw32(reg, val); \
33 mr32(reg); \
34 } while (0)
35
36#define iow32(reg, val) outl(val, (unsigned long)(regs + reg))
37#define ior32(reg) inl((unsigned long)(regs + reg))
38#define iow16(reg, val) outw((unsigned long)(val, regs + reg))
39#define ior16(reg) inw((unsigned long)(regs + reg))
40#define iow8(reg, val) outb((unsigned long)(val, regs + reg))
41#define ior8(reg) inb((unsigned long)(regs + reg))
42
43static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr)
44{
45 void __iomem *regs = mvi->regs;
46 mw32(MVS_CMD_ADDR, addr);
47 return mr32(MVS_CMD_DATA);
48}
49
50static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val)
51{
52 void __iomem *regs = mvi->regs;
53 mw32(MVS_CMD_ADDR, addr);
54 mw32(MVS_CMD_DATA, val);
55}
56
57static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
58{
59 void __iomem *regs = mvi->regs;
60 return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) :
61 mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4);
62}
63
64static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
65{
66 void __iomem *regs = mvi->regs;
67 if (port < 4)
68 mw32(MVS_P0_SER_CTLSTAT + port * 4, val);
69 else
70 mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val);
71}
72
73static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off,
74 u32 off2, u32 port)
75{
76 void __iomem *regs = mvi->regs + off;
77 void __iomem *regs2 = mvi->regs + off2;
78 return (port < 4) ? readl(regs + port * 8) :
79 readl(regs2 + (port - 4) * 8);
80}
81
82static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
83 u32 port, u32 val)
84{
85 void __iomem *regs = mvi->regs + off;
86 void __iomem *regs2 = mvi->regs + off2;
87 if (port < 4)
88 writel(val, regs + port * 8);
89 else
90 writel(val, regs2 + (port - 4) * 8);
91}
92
93static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
94{
95 return mvs_read_port(mvi, MVS_P0_CFG_DATA,
96 MVS_P4_CFG_DATA, port);
97}
98
99static inline void mvs_write_port_cfg_data(struct mvs_info *mvi,
100 u32 port, u32 val)
101{
102 mvs_write_port(mvi, MVS_P0_CFG_DATA,
103 MVS_P4_CFG_DATA, port, val);
104}
105
106static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi,
107 u32 port, u32 addr)
108{
109 mvs_write_port(mvi, MVS_P0_CFG_ADDR,
110 MVS_P4_CFG_ADDR, port, addr);
111 mdelay(10);
112}
113
114static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
115{
116 return mvs_read_port(mvi, MVS_P0_VSR_DATA,
117 MVS_P4_VSR_DATA, port);
118}
119
120static inline void mvs_write_port_vsr_data(struct mvs_info *mvi,
121 u32 port, u32 val)
122{
123 mvs_write_port(mvi, MVS_P0_VSR_DATA,
124 MVS_P4_VSR_DATA, port, val);
125}
126
127static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi,
128 u32 port, u32 addr)
129{
130 mvs_write_port(mvi, MVS_P0_VSR_ADDR,
131 MVS_P4_VSR_ADDR, port, addr);
132 mdelay(10);
133}
134
135static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
136{
137 return mvs_read_port(mvi, MVS_P0_INT_STAT,
138 MVS_P4_INT_STAT, port);
139}
140
141static inline void mvs_write_port_irq_stat(struct mvs_info *mvi,
142 u32 port, u32 val)
143{
144 mvs_write_port(mvi, MVS_P0_INT_STAT,
145 MVS_P4_INT_STAT, port, val);
146}
147
148static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
149{
150 return mvs_read_port(mvi, MVS_P0_INT_MASK,
151 MVS_P4_INT_MASK, port);
152
153}
154
155static inline void mvs_write_port_irq_mask(struct mvs_info *mvi,
156 u32 port, u32 val)
157{
158 mvs_write_port(mvi, MVS_P0_INT_MASK,
159 MVS_P4_INT_MASK, port, val);
160}
161
162static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
163{
164 u32 tmp;
165
166 /* workaround for SATA R-ERR, to ignore phy glitch */
167 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
168 tmp &= ~(1 << 9);
169 tmp |= (1 << 10);
170 mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
171
172 /* enable retry 127 times */
173 mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f);
174
175 /* extend open frame timeout to max */
176 tmp = mvs_cr32(mvi, CMD_SAS_CTL0);
177 tmp &= ~0xffff;
178 tmp |= 0x3fff;
179 mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
180
181 /* workaround for WDTIMEOUT , set to 550 ms */
182 mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
183
184 /* not to halt for different port op during wideport link change */
185 mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
186
187 /* workaround for Seagate disk not-found OOB sequence, recv
188 * COMINIT before sending out COMWAKE */
189 tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
190 tmp &= 0x0000ffff;
191 tmp |= 0x00fa0000;
192 mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
193
194 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
195 tmp &= 0x1fffffff;
196 tmp |= (2U << 29); /* 8 ms retry */
197 mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
198}
199
200static inline void mvs_int_sata(struct mvs_info *mvi)
201{
202 u32 tmp;
203 void __iomem *regs = mvi->regs;
204 tmp = mr32(MVS_INT_STAT_SRS_0);
205 if (tmp)
206 mw32(MVS_INT_STAT_SRS_0, tmp);
207 MVS_CHIP_DISP->clear_active_cmds(mvi);
208}
209
210static inline void mvs_int_full(struct mvs_info *mvi)
211{
212 void __iomem *regs = mvi->regs;
213 u32 tmp, stat;
214 int i;
215
216 stat = mr32(MVS_INT_STAT);
217 mvs_int_rx(mvi, false);
218
219 for (i = 0; i < mvi->chip->n_phy; i++) {
220 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
221 if (tmp)
222 mvs_int_port(mvi, i, tmp);
223 }
224
225 if (stat & CINT_SRS)
226 mvs_int_sata(mvi);
227
228 mw32(MVS_INT_STAT, stat);
229}
230
231static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx)
232{
233 void __iomem *regs = mvi->regs;
234 mw32(MVS_TX_PROD_IDX, tx);
235}
236
237static inline u32 mvs_rx_update(struct mvs_info *mvi)
238{
239 void __iomem *regs = mvi->regs;
240 return mr32(MVS_RX_CONS_IDX);
241}
242
243static inline u32 mvs_get_prd_size(void)
244{
245 return sizeof(struct mvs_prd);
246}
247
248static inline u32 mvs_get_prd_count(void)
249{
250 return MAX_SG_ENTRY;
251}
252
253static inline void mvs_show_pcie_usage(struct mvs_info *mvi)
254{
255 u16 link_stat, link_spd;
256 const char *spd[] = {
257 "UnKnown",
258 "2.5",
259 "5.0",
260 };
261 if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0)
262 return;
263
264 pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat);
265 link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS;
266 if (link_spd >= 3)
267 link_spd = 0;
268 dev_printk(KERN_INFO, mvi->dev,
269 "mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n",
270 (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS,
271 spd[link_spd]);
272}
273
274static inline u32 mvs_hw_max_link_rate(void)
275{
276 return MAX_LINK_RATE;
277}
278
279#endif /* _MV_CHIPS_H_ */
280
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
new file mode 100644
index 000000000000..f8cb9defb961
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -0,0 +1,502 @@
1/*
2 * Marvell 88SE64xx/88SE94xx const head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MV_DEFS_H_
26#define _MV_DEFS_H_
27
28
29enum chip_flavors {
30 chip_6320,
31 chip_6440,
32 chip_6485,
33 chip_9480,
34 chip_9180,
35};
36
37/* driver compile-time configuration */
38enum driver_configuration {
39 MVS_SLOTS = 512, /* command slots */
40 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
41 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
42 /* software requires power-of-2
43 ring size */
44 MVS_SOC_SLOTS = 64,
45 MVS_SOC_TX_RING_SZ = MVS_SOC_SLOTS * 2,
46 MVS_SOC_RX_RING_SZ = MVS_SOC_SLOTS * 2,
47
48 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
49 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
50 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
51 MVS_OAF_SZ = 64, /* Open address frame buffer size */
52 MVS_QUEUE_SIZE = 32, /* Support Queue depth */
53 MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */
54 MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2,
55};
56
57/* unchangeable hardware details */
58enum hardware_details {
59 MVS_MAX_PHYS = 8, /* max. possible phys */
60 MVS_MAX_PORTS = 8, /* max. possible ports */
61 MVS_SOC_PHYS = 4, /* soc phys */
62 MVS_SOC_PORTS = 4, /* soc phys */
63 MVS_MAX_DEVICES = 1024, /* max supported device */
64};
65
66/* peripheral registers (BAR2) */
67enum peripheral_registers {
68 SPI_CTL = 0x10, /* EEPROM control */
69 SPI_CMD = 0x14, /* EEPROM command */
70 SPI_DATA = 0x18, /* EEPROM data */
71};
72
73enum peripheral_register_bits {
74 TWSI_RDY = (1U << 7), /* EEPROM interface ready */
75 TWSI_RD = (1U << 4), /* EEPROM read access */
76
77 SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
78};
79
80enum hw_register_bits {
81 /* MVS_GBL_CTL */
82 INT_EN = (1U << 1), /* Global int enable */
83 HBA_RST = (1U << 0), /* HBA reset */
84
85 /* MVS_GBL_INT_STAT */
86 INT_XOR = (1U << 4), /* XOR engine event */
87 INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
88
89 /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
90 SATA_TARGET = (1U << 16), /* port0 SATA target enable */
91 MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
92 MODE_AUTO_DET_PORT6 = (1U << 14),
93 MODE_AUTO_DET_PORT5 = (1U << 13),
94 MODE_AUTO_DET_PORT4 = (1U << 12),
95 MODE_AUTO_DET_PORT3 = (1U << 11),
96 MODE_AUTO_DET_PORT2 = (1U << 10),
97 MODE_AUTO_DET_PORT1 = (1U << 9),
98 MODE_AUTO_DET_PORT0 = (1U << 8),
99 MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
100 MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
101 MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
102 MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
103 MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
104 MODE_SAS_PORT6_MASK = (1U << 6),
105 MODE_SAS_PORT5_MASK = (1U << 5),
106 MODE_SAS_PORT4_MASK = (1U << 4),
107 MODE_SAS_PORT3_MASK = (1U << 3),
108 MODE_SAS_PORT2_MASK = (1U << 2),
109 MODE_SAS_PORT1_MASK = (1U << 1),
110 MODE_SAS_PORT0_MASK = (1U << 0),
111 MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
112 MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
113 MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
114 MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
115
116 /* SAS_MODE value may be
117 * dictated (in hw) by values
118 * of SATA_TARGET & AUTO_DET
119 */
120
121 /* MVS_TX_CFG */
122 TX_EN = (1U << 16), /* Enable TX */
123 TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
124
125 /* MVS_RX_CFG */
126 RX_EN = (1U << 16), /* Enable RX */
127 RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
128
129 /* MVS_INT_COAL */
130 COAL_EN = (1U << 16), /* Enable int coalescing */
131
132 /* MVS_INT_STAT, MVS_INT_MASK */
133 CINT_I2C = (1U << 31), /* I2C event */
134 CINT_SW0 = (1U << 30), /* software event 0 */
135 CINT_SW1 = (1U << 29), /* software event 1 */
136 CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
137 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
138 CINT_MEM = (1U << 26), /* int mem parity err */
139 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
140 CINT_SRS = (1U << 3), /* SRS event */
141 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
142 CINT_DONE = (1U << 0), /* cmd completion */
143
144 /* shl for ports 1-3 */
145 CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
146 CINT_PORT = (1U << 8), /* port0 event */
147 CINT_PORT_MASK_OFFSET = 8,
148 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
149 CINT_PHY_MASK_OFFSET = 4,
150 CINT_PHY_MASK = (0x0F << CINT_PHY_MASK_OFFSET),
151
152 /* TX (delivery) ring bits */
153 TXQ_CMD_SHIFT = 29,
154 TXQ_CMD_SSP = 1, /* SSP protocol */
155 TXQ_CMD_SMP = 2, /* SMP protocol */
156 TXQ_CMD_STP = 3, /* STP/SATA protocol */
157 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
158 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
159 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
160 TXQ_MODE_TARGET = 0,
161 TXQ_MODE_INITIATOR = 1,
162 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
163 TXQ_PRI_NORMAL = 0,
164 TXQ_PRI_HIGH = 1,
165 TXQ_SRS_SHIFT = 20, /* SATA register set */
166 TXQ_SRS_MASK = 0x7f,
167 TXQ_PHY_SHIFT = 12, /* PHY bitmap */
168 TXQ_PHY_MASK = 0xff,
169 TXQ_SLOT_MASK = 0xfff, /* slot number */
170
171 /* RX (completion) ring bits */
172 RXQ_GOOD = (1U << 23), /* Response good */
173 RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
174 RXQ_CMD_RX = (1U << 20), /* target cmd received */
175 RXQ_ATTN = (1U << 19), /* attention */
176 RXQ_RSP = (1U << 18), /* response frame xfer'd */
177 RXQ_ERR = (1U << 17), /* err info rec xfer'd */
178 RXQ_DONE = (1U << 16), /* cmd complete */
179 RXQ_SLOT_MASK = 0xfff, /* slot number */
180
181 /* mvs_cmd_hdr bits */
182 MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
183 MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
184
185 /* SSP initiator only */
186 MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
187
188 /* SSP initiator or target */
189 MCH_SSP_FR_TASK = 0x1, /* TASK frame */
190
191 /* SSP target only */
192 MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
193 MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
194 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
195 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
196
197 MCH_SSP_MODE_PASSTHRU = 1,
198 MCH_SSP_MODE_NORMAL = 0,
199 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
200 MCH_FBURST = (1U << 11), /* first burst (SSP) */
201 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
202 MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
203 MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
204 MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
205 MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
206 MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
207 MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
208 MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
209
210 CCTL_RST = (1U << 5), /* port logic reset */
211
212 /* 0(LSB first), 1(MSB first) */
213 CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
214 CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
215 CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
216 CCTL_ENDIAN_CMD = (1U << 0), /* command table */
217
218 /* MVS_Px_SER_CTLSTAT (per-phy control) */
219 PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
220 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
221 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
222 PHY_RST = (1U << 0), /* phy reset */
223 PHY_READY_MASK = (1U << 20),
224
225 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
226 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
227 PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */
228 PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */
229 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
230 PHYEV_AN = (1U << 18), /* SATA async notification */
231 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
232 PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
233 PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
234 PHYEV_IU_BIG = (1U << 11), /* IU too long err */
235 PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
236 PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
237 PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
238 PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
239 PHYEV_PORT_SEL = (1U << 6), /* port selector present */
240 PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
241 PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
242 PHYEV_ID_FAIL = (1U << 3), /* identify failed */
243 PHYEV_ID_DONE = (1U << 2), /* identify done */
244 PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
245 PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
246
247 /* MVS_PCS */
248 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
249 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
250 PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6485 */
251 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
252 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
253 PCS_SATA_RETRY_2 = (1U << 6), /* For 9180 */
254 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
255 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
256 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
257 PCS_CMD_RST = (1U << 1), /* reset cmd issue */
258 PCS_CMD_EN = (1U << 0), /* enable cmd issue */
259
260 /* Port n Attached Device Info */
261 PORT_DEV_SSP_TRGT = (1U << 19),
262 PORT_DEV_SMP_TRGT = (1U << 18),
263 PORT_DEV_STP_TRGT = (1U << 17),
264 PORT_DEV_SSP_INIT = (1U << 11),
265 PORT_DEV_SMP_INIT = (1U << 10),
266 PORT_DEV_STP_INIT = (1U << 9),
267 PORT_PHY_ID_MASK = (0xFFU << 24),
268 PORT_SSP_TRGT_MASK = (0x1U << 19),
269 PORT_SSP_INIT_MASK = (0x1U << 11),
270 PORT_DEV_TRGT_MASK = (0x7U << 17),
271 PORT_DEV_INIT_MASK = (0x7U << 9),
272 PORT_DEV_TYPE_MASK = (0x7U << 0),
273
274 /* Port n PHY Status */
275 PHY_RDY = (1U << 2),
276 PHY_DW_SYNC = (1U << 1),
277 PHY_OOB_DTCTD = (1U << 0),
278
279 /* VSR */
280 /* PHYMODE 6 (CDB) */
281 PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
282 PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
283 PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
284 PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
285 PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
286 PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
287 PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
288 PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
289 PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
290 PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
291 PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
292 PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
293 PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
294 PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
295};
296
297/* SAS/SATA configuration port registers, aka phy registers */
298enum sas_sata_config_port_regs {
299 PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
300 PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
301 PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
302 PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
303 PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
304 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
305 PHYR_SATA_CTL = 0x18, /* SATA control */
306 PHYR_PHY_STAT = 0x1C, /* PHY status */
307 PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
308 PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
309 PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
310 PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
311 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
312 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
313 PHYR_WIDE_PORT = 0x38, /* wide port participating */
314 PHYR_CURRENT0 = 0x80, /* current connection info 0 */
315 PHYR_CURRENT1 = 0x84, /* current connection info 1 */
316 PHYR_CURRENT2 = 0x88, /* current connection info 2 */
317 CONFIG_ID_FRAME0 = 0x100, /* Port device ID frame register 0 */
318 CONFIG_ID_FRAME1 = 0x104, /* Port device ID frame register 1 */
319 CONFIG_ID_FRAME2 = 0x108, /* Port device ID frame register 2 */
320 CONFIG_ID_FRAME3 = 0x10c, /* Port device ID frame register 3 */
321 CONFIG_ID_FRAME4 = 0x110, /* Port device ID frame register 4 */
322 CONFIG_ID_FRAME5 = 0x114, /* Port device ID frame register 5 */
323 CONFIG_ID_FRAME6 = 0x118, /* Port device ID frame register 6 */
324 CONFIG_ATT_ID_FRAME0 = 0x11c, /* attached ID frame register 0 */
325 CONFIG_ATT_ID_FRAME1 = 0x120, /* attached ID frame register 1 */
326 CONFIG_ATT_ID_FRAME2 = 0x124, /* attached ID frame register 2 */
327 CONFIG_ATT_ID_FRAME3 = 0x128, /* attached ID frame register 3 */
328 CONFIG_ATT_ID_FRAME4 = 0x12c, /* attached ID frame register 4 */
329 CONFIG_ATT_ID_FRAME5 = 0x130, /* attached ID frame register 5 */
330 CONFIG_ATT_ID_FRAME6 = 0x134, /* attached ID frame register 6 */
331};
332
333enum sas_cmd_port_registers {
334 CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
335 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
336 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
337 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
338 CMD_OOB_SPACE = 0x110, /* OOB space control register */
339 CMD_OOB_BURST = 0x114, /* OOB burst control register */
340 CMD_PHY_TIMER = 0x118, /* PHY timer control register */
341 CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
342 CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
343 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
344 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
345 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
346 CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
347 CMD_ID_TEST = 0x134, /* ID test register */
348 CMD_PL_TIMER = 0x138, /* PL timer register */
349 CMD_WD_TIMER = 0x13c, /* WD timer register */
350 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
351 CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
352 CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
353 CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
354 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
355 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
356 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
357 CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
358 CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
359 CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
360 CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
361 CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
362 CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
363 CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
364 CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
365 CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
366 CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
367 CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
368 CMD_RESET_COUNT = 0x188, /* Reset Count */
369 CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
370 CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
371 CMD_PHY_CTL = 0x194, /* PHY Control and Status */
372 CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
373 CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
374 CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
375 CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
376 CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
377 CMD_HOST_CTL = 0x1AC, /* Host Control Status */
378 CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
379 CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
380 CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
381 CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
382 CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
383 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
384};
385
386enum mvs_info_flags {
387 MVF_MSI = (1U << 0), /* MSI is enabled */
388 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
389 MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */
390};
391
392enum mvs_event_flags {
393 PHY_PLUG_EVENT = (3U),
394 PHY_PLUG_IN = (1U << 0), /* phy plug in */
395 PHY_PLUG_OUT = (1U << 1), /* phy plug out */
396};
397
398enum mvs_port_type {
399 PORT_TGT_MASK = (1U << 5),
400 PORT_INIT_PORT = (1U << 4),
401 PORT_TGT_PORT = (1U << 3),
402 PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT),
403 PORT_TYPE_SAS = (1U << 1),
404 PORT_TYPE_SATA = (1U << 0),
405};
406
407/* Command Table Format */
408enum ct_format {
409 /* SSP */
410 SSP_F_H = 0x00,
411 SSP_F_IU = 0x18,
412 SSP_F_MAX = 0x4D,
413 /* STP */
414 STP_CMD_FIS = 0x00,
415 STP_ATAPI_CMD = 0x40,
416 STP_F_MAX = 0x10,
417 /* SMP */
418 SMP_F_T = 0x00,
419 SMP_F_DEP = 0x01,
420 SMP_F_MAX = 0x101,
421};
422
423enum status_buffer {
424 SB_EIR_OFF = 0x00, /* Error Information Record */
425 SB_RFB_OFF = 0x08, /* Response Frame Buffer */
426 SB_RFB_MAX = 0x400, /* RFB size*/
427};
428
429enum error_info_rec {
430 CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
431 CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
432 RSP_OVER = (1U << 29), /* rsp buffer overflow */
433 RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
434 UNK_FIS = (1U << 27), /* unknown FIS */
435 DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
436 SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
437 TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
438 R_ERR = (1U << 23), /* SATA returned R_ERR prim */
439 RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
440 XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
441 UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
442 DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
443 INTERLOCK = (1U << 15), /* interlock error */
444 NAK = (1U << 14), /* NAK rx'd */
445 ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
446 CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
447 OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
448 PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
449 NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
450 STP_RES_BSY = (1U << 8), /* STP resources busy */
451 BREAK = (1U << 7), /* break received */
452 BAD_DEST = (1U << 6), /* bad destination */
453 BAD_PROTO = (1U << 5), /* protocol not supported */
454 BAD_RATE = (1U << 4), /* cxn rate not supported */
455 WRONG_DEST = (1U << 3), /* wrong destination error */
456 CREDIT_TO = (1U << 2), /* credit timeout */
457 WDOG_TO = (1U << 1), /* watchdog timeout */
458 BUF_PAR = (1U << 0), /* buffer parity error */
459};
460
461enum error_info_rec_2 {
462 SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
463 GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
464 APP_CHK_ERR = (1U << 13), /* Application Check error */
465 REF_CHK_ERR = (1U << 12), /* Reference Check Error */
466 USR_BLK_NM = (1U << 0), /* User Block Number */
467};
468
469enum pci_cfg_register_bits {
470 PCTL_PWR_OFF = (0xFU << 24),
471 PCTL_COM_ON = (0xFU << 20),
472 PCTL_LINK_RST = (0xFU << 16),
473 PCTL_LINK_OFFS = (16),
474 PCTL_PHY_DSBL = (0xFU << 12),
475 PCTL_PHY_DSBL_OFFS = (12),
476 PRD_REQ_SIZE = (0x4000),
477 PRD_REQ_MASK = (0x00007000),
478 PLS_NEG_LINK_WD = (0x3FU << 4),
479 PLS_NEG_LINK_WD_OFFS = 4,
480 PLS_LINK_SPD = (0x0FU << 0),
481 PLS_LINK_SPD_OFFS = 0,
482};
483
484enum open_frame_protocol {
485 PROTOCOL_SMP = 0x0,
486 PROTOCOL_SSP = 0x1,
487 PROTOCOL_STP = 0x2,
488};
489
490/* define for response frame datapres field */
491enum datapres_field {
492 NO_DATA = 0,
493 RESPONSE_DATA = 1,
494 SENSE_DATA = 2,
495};
496
497/* define task management IU */
498struct mvs_tmf_task{
499 u8 tmf;
500 u16 tag_of_task_to_be_managed;
501};
502#endif
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
new file mode 100644
index 000000000000..8646a19f999d
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -0,0 +1,703 @@
1/*
2 * Marvell 88SE64xx/88SE94xx pci init
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25
26#include "mv_sas.h"
27
28static struct scsi_transport_template *mvs_stt;
29static const struct mvs_chip_info mvs_chips[] = {
30 [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
31 [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
32 [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
33 [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
34 [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
35};
36
37#define SOC_SAS_NUM 2
38
39static struct scsi_host_template mvs_sht = {
40 .module = THIS_MODULE,
41 .name = DRV_NAME,
42 .queuecommand = sas_queuecommand,
43 .target_alloc = sas_target_alloc,
44 .slave_configure = mvs_slave_configure,
45 .slave_destroy = sas_slave_destroy,
46 .scan_finished = mvs_scan_finished,
47 .scan_start = mvs_scan_start,
48 .change_queue_depth = sas_change_queue_depth,
49 .change_queue_type = sas_change_queue_type,
50 .bios_param = sas_bios_param,
51 .can_queue = 1,
52 .cmd_per_lun = 1,
53 .this_id = -1,
54 .sg_tablesize = SG_ALL,
55 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
56 .use_clustering = ENABLE_CLUSTERING,
57 .eh_device_reset_handler = sas_eh_device_reset_handler,
58 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
59 .slave_alloc = mvs_slave_alloc,
60 .target_destroy = sas_target_destroy,
61 .ioctl = sas_ioctl,
62};
63
64static struct sas_domain_function_template mvs_transport_ops = {
65 .lldd_dev_found = mvs_dev_found,
66 .lldd_dev_gone = mvs_dev_gone,
67
68 .lldd_execute_task = mvs_queue_command,
69 .lldd_control_phy = mvs_phy_control,
70
71 .lldd_abort_task = mvs_abort_task,
72 .lldd_abort_task_set = mvs_abort_task_set,
73 .lldd_clear_aca = mvs_clear_aca,
74 .lldd_clear_task_set = mvs_clear_task_set,
75 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
76 .lldd_lu_reset = mvs_lu_reset,
77 .lldd_query_task = mvs_query_task,
78
79 .lldd_port_formed = mvs_port_formed,
80 .lldd_port_deformed = mvs_port_deformed,
81
82};
83
84static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
85{
86 struct mvs_phy *phy = &mvi->phy[phy_id];
87 struct asd_sas_phy *sas_phy = &phy->sas_phy;
88
89 phy->mvi = mvi;
90 init_timer(&phy->timer);
91 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
92 sas_phy->class = SAS;
93 sas_phy->iproto = SAS_PROTOCOL_ALL;
94 sas_phy->tproto = 0;
95 sas_phy->type = PHY_TYPE_PHYSICAL;
96 sas_phy->role = PHY_ROLE_INITIATOR;
97 sas_phy->oob_mode = OOB_NOT_CONNECTED;
98 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
99
100 sas_phy->id = phy_id;
101 sas_phy->sas_addr = &mvi->sas_addr[0];
102 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
103 sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
104 sas_phy->lldd_phy = phy;
105}
106
107static void mvs_free(struct mvs_info *mvi)
108{
109 int i;
110 struct mvs_wq *mwq;
111 int slot_nr;
112
113 if (!mvi)
114 return;
115
116 if (mvi->flags & MVF_FLAG_SOC)
117 slot_nr = MVS_SOC_SLOTS;
118 else
119 slot_nr = MVS_SLOTS;
120
121 for (i = 0; i < mvi->tags_num; i++) {
122 struct mvs_slot_info *slot = &mvi->slot_info[i];
123 if (slot->buf)
124 dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
125 slot->buf, slot->buf_dma);
126 }
127
128 if (mvi->tx)
129 dma_free_coherent(mvi->dev,
130 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
131 mvi->tx, mvi->tx_dma);
132 if (mvi->rx_fis)
133 dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
134 mvi->rx_fis, mvi->rx_fis_dma);
135 if (mvi->rx)
136 dma_free_coherent(mvi->dev,
137 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
138 mvi->rx, mvi->rx_dma);
139 if (mvi->slot)
140 dma_free_coherent(mvi->dev,
141 sizeof(*mvi->slot) * slot_nr,
142 mvi->slot, mvi->slot_dma);
143#ifndef DISABLE_HOTPLUG_DMA_FIX
144 if (mvi->bulk_buffer)
145 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
146 mvi->bulk_buffer, mvi->bulk_buffer_dma);
147#endif
148
149 MVS_CHIP_DISP->chip_iounmap(mvi);
150 if (mvi->shost)
151 scsi_host_put(mvi->shost);
152 list_for_each_entry(mwq, &mvi->wq_list, entry)
153 cancel_delayed_work(&mwq->work_q);
154 kfree(mvi);
155}
156
157#ifdef MVS_USE_TASKLET
158struct tasklet_struct mv_tasklet;
159static void mvs_tasklet(unsigned long opaque)
160{
161 unsigned long flags;
162 u32 stat;
163 u16 core_nr, i = 0;
164
165 struct mvs_info *mvi;
166 struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque;
167
168 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
169 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
170
171 if (unlikely(!mvi))
172 BUG_ON(1);
173
174 for (i = 0; i < core_nr; i++) {
175 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
176 stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq);
177 if (stat)
178 MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
179 }
180
181}
182#endif
183
184static irqreturn_t mvs_interrupt(int irq, void *opaque)
185{
186 u32 core_nr, i = 0;
187 u32 stat;
188 struct mvs_info *mvi;
189 struct sas_ha_struct *sha = opaque;
190
191 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
192 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
193
194 if (unlikely(!mvi))
195 return IRQ_NONE;
196
197 stat = MVS_CHIP_DISP->isr_status(mvi, irq);
198 if (!stat)
199 return IRQ_NONE;
200
201#ifdef MVS_USE_TASKLET
202 tasklet_schedule(&mv_tasklet);
203#else
204 for (i = 0; i < core_nr; i++) {
205 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
206 MVS_CHIP_DISP->isr(mvi, irq, stat);
207 }
208#endif
209 return IRQ_HANDLED;
210}
211
212static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
213{
214 int i, slot_nr;
215
216 if (mvi->flags & MVF_FLAG_SOC)
217 slot_nr = MVS_SOC_SLOTS;
218 else
219 slot_nr = MVS_SLOTS;
220
221 spin_lock_init(&mvi->lock);
222 for (i = 0; i < mvi->chip->n_phy; i++) {
223 mvs_phy_init(mvi, i);
224 mvi->port[i].wide_port_phymap = 0;
225 mvi->port[i].port_attached = 0;
226 INIT_LIST_HEAD(&mvi->port[i].list);
227 }
228 for (i = 0; i < MVS_MAX_DEVICES; i++) {
229 mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
230 mvi->devices[i].dev_type = NO_DEVICE;
231 mvi->devices[i].device_id = i;
232 mvi->devices[i].dev_status = MVS_DEV_NORMAL;
233 }
234
235 /*
236 * alloc and init our DMA areas
237 */
238 mvi->tx = dma_alloc_coherent(mvi->dev,
239 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
240 &mvi->tx_dma, GFP_KERNEL);
241 if (!mvi->tx)
242 goto err_out;
243 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
244 mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
245 &mvi->rx_fis_dma, GFP_KERNEL);
246 if (!mvi->rx_fis)
247 goto err_out;
248 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
249
250 mvi->rx = dma_alloc_coherent(mvi->dev,
251 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
252 &mvi->rx_dma, GFP_KERNEL);
253 if (!mvi->rx)
254 goto err_out;
255 memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
256 mvi->rx[0] = cpu_to_le32(0xfff);
257 mvi->rx_cons = 0xfff;
258
259 mvi->slot = dma_alloc_coherent(mvi->dev,
260 sizeof(*mvi->slot) * slot_nr,
261 &mvi->slot_dma, GFP_KERNEL);
262 if (!mvi->slot)
263 goto err_out;
264 memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
265
266#ifndef DISABLE_HOTPLUG_DMA_FIX
267 mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
268 TRASH_BUCKET_SIZE,
269 &mvi->bulk_buffer_dma, GFP_KERNEL);
270 if (!mvi->bulk_buffer)
271 goto err_out;
272#endif
273 for (i = 0; i < slot_nr; i++) {
274 struct mvs_slot_info *slot = &mvi->slot_info[i];
275
276 slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
277 &slot->buf_dma, GFP_KERNEL);
278 if (!slot->buf) {
279 printk(KERN_DEBUG"failed to allocate slot->buf.\n");
280 goto err_out;
281 }
282 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
283 ++mvi->tags_num;
284 }
285 /* Initialize tags */
286 mvs_tag_init(mvi);
287 return 0;
288err_out:
289 return 1;
290}
291
292
293int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
294{
295 unsigned long res_start, res_len, res_flag, res_flag_ex = 0;
296 struct pci_dev *pdev = mvi->pdev;
297 if (bar_ex != -1) {
298 /*
299 * ioremap main and peripheral registers
300 */
301 res_start = pci_resource_start(pdev, bar_ex);
302 res_len = pci_resource_len(pdev, bar_ex);
303 if (!res_start || !res_len)
304 goto err_out;
305
306 res_flag_ex = pci_resource_flags(pdev, bar_ex);
307 if (res_flag_ex & IORESOURCE_MEM) {
308 if (res_flag_ex & IORESOURCE_CACHEABLE)
309 mvi->regs_ex = ioremap(res_start, res_len);
310 else
311 mvi->regs_ex = ioremap_nocache(res_start,
312 res_len);
313 } else
314 mvi->regs_ex = (void *)res_start;
315 if (!mvi->regs_ex)
316 goto err_out;
317 }
318
319 res_start = pci_resource_start(pdev, bar);
320 res_len = pci_resource_len(pdev, bar);
321 if (!res_start || !res_len)
322 goto err_out;
323
324 res_flag = pci_resource_flags(pdev, bar);
325 if (res_flag & IORESOURCE_CACHEABLE)
326 mvi->regs = ioremap(res_start, res_len);
327 else
328 mvi->regs = ioremap_nocache(res_start, res_len);
329
330 if (!mvi->regs) {
331 if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
332 iounmap(mvi->regs_ex);
333 mvi->regs_ex = NULL;
334 goto err_out;
335 }
336
337 return 0;
338err_out:
339 return -1;
340}
341
342void mvs_iounmap(void __iomem *regs)
343{
344 iounmap(regs);
345}
346
347static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
348 const struct pci_device_id *ent,
349 struct Scsi_Host *shost, unsigned int id)
350{
351 struct mvs_info *mvi;
352 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
353
354 mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info),
355 GFP_KERNEL);
356 if (!mvi)
357 return NULL;
358
359 mvi->pdev = pdev;
360 mvi->dev = &pdev->dev;
361 mvi->chip_id = ent->driver_data;
362 mvi->chip = &mvs_chips[mvi->chip_id];
363 INIT_LIST_HEAD(&mvi->wq_list);
364 mvi->irq = pdev->irq;
365
366 ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
367 ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
368
369 mvi->id = id;
370 mvi->sas = sha;
371 mvi->shost = shost;
372#ifdef MVS_USE_TASKLET
373 tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha);
374#endif
375
376 if (MVS_CHIP_DISP->chip_ioremap(mvi))
377 goto err_out;
378 if (!mvs_alloc(mvi, shost))
379 return mvi;
380err_out:
381 mvs_free(mvi);
382 return NULL;
383}
384
385/* move to PCI layer or libata core? */
386static int pci_go_64(struct pci_dev *pdev)
387{
388 int rc;
389
390 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
391 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
392 if (rc) {
393 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
394 if (rc) {
395 dev_printk(KERN_ERR, &pdev->dev,
396 "64-bit DMA enable failed\n");
397 return rc;
398 }
399 }
400 } else {
401 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
402 if (rc) {
403 dev_printk(KERN_ERR, &pdev->dev,
404 "32-bit DMA enable failed\n");
405 return rc;
406 }
407 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
408 if (rc) {
409 dev_printk(KERN_ERR, &pdev->dev,
410 "32-bit consistent DMA enable failed\n");
411 return rc;
412 }
413 }
414
415 return rc;
416}
417
418static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
419 const struct mvs_chip_info *chip_info)
420{
421 int phy_nr, port_nr; unsigned short core_nr;
422 struct asd_sas_phy **arr_phy;
423 struct asd_sas_port **arr_port;
424 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
425
426 core_nr = chip_info->n_host;
427 phy_nr = core_nr * chip_info->n_phy;
428 port_nr = phy_nr;
429
430 memset(sha, 0x00, sizeof(struct sas_ha_struct));
431 arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
432 arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
433 if (!arr_phy || !arr_port)
434 goto exit_free;
435
436 sha->sas_phy = arr_phy;
437 sha->sas_port = arr_port;
438
439 sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
440 if (!sha->lldd_ha)
441 goto exit_free;
442
443 ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
444
445 shost->transportt = mvs_stt;
446 shost->max_id = 128;
447 shost->max_lun = ~0;
448 shost->max_channel = 1;
449 shost->max_cmd_len = 16;
450
451 return 0;
452exit_free:
453 kfree(arr_phy);
454 kfree(arr_port);
455 return -1;
456
457}
458
459static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
460 const struct mvs_chip_info *chip_info)
461{
462 int can_queue, i = 0, j = 0;
463 struct mvs_info *mvi = NULL;
464 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
465 unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
466
467 for (j = 0; j < nr_core; j++) {
468 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
469 for (i = 0; i < chip_info->n_phy; i++) {
470 sha->sas_phy[j * chip_info->n_phy + i] =
471 &mvi->phy[i].sas_phy;
472 sha->sas_port[j * chip_info->n_phy + i] =
473 &mvi->port[i].sas_port;
474 }
475 }
476
477 sha->sas_ha_name = DRV_NAME;
478 sha->dev = mvi->dev;
479 sha->lldd_module = THIS_MODULE;
480 sha->sas_addr = &mvi->sas_addr[0];
481
482 sha->num_phys = nr_core * chip_info->n_phy;
483
484 sha->lldd_max_execute_num = 1;
485
486 if (mvi->flags & MVF_FLAG_SOC)
487 can_queue = MVS_SOC_CAN_QUEUE;
488 else
489 can_queue = MVS_CAN_QUEUE;
490
491 sha->lldd_queue_size = can_queue;
492 shost->can_queue = can_queue;
493 mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys;
494 sha->core.shost = mvi->shost;
495}
496
497static void mvs_init_sas_add(struct mvs_info *mvi)
498{
499 u8 i;
500 for (i = 0; i < mvi->chip->n_phy; i++) {
501 mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
502 mvi->phy[i].dev_sas_addr =
503 cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
504 }
505
506 memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
507}
508
509static int __devinit mvs_pci_init(struct pci_dev *pdev,
510 const struct pci_device_id *ent)
511{
512 unsigned int rc, nhost = 0;
513 struct mvs_info *mvi;
514 irq_handler_t irq_handler = mvs_interrupt;
515 struct Scsi_Host *shost = NULL;
516 const struct mvs_chip_info *chip;
517
518 dev_printk(KERN_INFO, &pdev->dev,
519 "mvsas: driver version %s\n", DRV_VERSION);
520 rc = pci_enable_device(pdev);
521 if (rc)
522 goto err_out_enable;
523
524 pci_set_master(pdev);
525
526 rc = pci_request_regions(pdev, DRV_NAME);
527 if (rc)
528 goto err_out_disable;
529
530 rc = pci_go_64(pdev);
531 if (rc)
532 goto err_out_regions;
533
534 shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
535 if (!shost) {
536 rc = -ENOMEM;
537 goto err_out_regions;
538 }
539
540 chip = &mvs_chips[ent->driver_data];
541 SHOST_TO_SAS_HA(shost) =
542 kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
543 if (!SHOST_TO_SAS_HA(shost)) {
544 kfree(shost);
545 rc = -ENOMEM;
546 goto err_out_regions;
547 }
548
549 rc = mvs_prep_sas_ha_init(shost, chip);
550 if (rc) {
551 kfree(shost);
552 rc = -ENOMEM;
553 goto err_out_regions;
554 }
555
556 pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
557
558 do {
559 mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
560 if (!mvi) {
561 rc = -ENOMEM;
562 goto err_out_regions;
563 }
564
565 mvs_init_sas_add(mvi);
566
567 mvi->instance = nhost;
568 rc = MVS_CHIP_DISP->chip_init(mvi);
569 if (rc) {
570 mvs_free(mvi);
571 goto err_out_regions;
572 }
573 nhost++;
574 } while (nhost < chip->n_host);
575
576 mvs_post_sas_ha_init(shost, chip);
577
578 rc = scsi_add_host(shost, &pdev->dev);
579 if (rc)
580 goto err_out_shost;
581
582 rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
583 if (rc)
584 goto err_out_shost;
585 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED,
586 DRV_NAME, SHOST_TO_SAS_HA(shost));
587 if (rc)
588 goto err_not_sas;
589
590 MVS_CHIP_DISP->interrupt_enable(mvi);
591
592 scsi_scan_host(mvi->shost);
593
594 return 0;
595
596err_not_sas:
597 sas_unregister_ha(SHOST_TO_SAS_HA(shost));
598err_out_shost:
599 scsi_remove_host(mvi->shost);
600err_out_regions:
601 pci_release_regions(pdev);
602err_out_disable:
603 pci_disable_device(pdev);
604err_out_enable:
605 return rc;
606}
607
608static void __devexit mvs_pci_remove(struct pci_dev *pdev)
609{
610 unsigned short core_nr, i = 0;
611 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
612 struct mvs_info *mvi = NULL;
613
614 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
615 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
616
617#ifdef MVS_USE_TASKLET
618 tasklet_kill(&mv_tasklet);
619#endif
620
621 pci_set_drvdata(pdev, NULL);
622 sas_unregister_ha(sha);
623 sas_remove_host(mvi->shost);
624 scsi_remove_host(mvi->shost);
625
626 MVS_CHIP_DISP->interrupt_disable(mvi);
627 free_irq(mvi->irq, sha);
628 for (i = 0; i < core_nr; i++) {
629 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
630 mvs_free(mvi);
631 }
632 kfree(sha->sas_phy);
633 kfree(sha->sas_port);
634 kfree(sha);
635 pci_release_regions(pdev);
636 pci_disable_device(pdev);
637 return;
638}
639
640static struct pci_device_id __devinitdata mvs_pci_table[] = {
641 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
642 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
643 {
644 .vendor = PCI_VENDOR_ID_MARVELL,
645 .device = 0x6440,
646 .subvendor = PCI_ANY_ID,
647 .subdevice = 0x6480,
648 .class = 0,
649 .class_mask = 0,
650 .driver_data = chip_6485,
651 },
652 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
653 { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 },
654 { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 },
655 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
656
657 { } /* terminate list */
658};
659
660static struct pci_driver mvs_pci_driver = {
661 .name = DRV_NAME,
662 .id_table = mvs_pci_table,
663 .probe = mvs_pci_init,
664 .remove = __devexit_p(mvs_pci_remove),
665};
666
667/* task handler */
668struct task_struct *mvs_th;
669static int __init mvs_init(void)
670{
671 int rc;
672 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
673 if (!mvs_stt)
674 return -ENOMEM;
675
676 rc = pci_register_driver(&mvs_pci_driver);
677
678 if (rc)
679 goto err_out;
680
681 return 0;
682
683err_out:
684 sas_release_transport(mvs_stt);
685 return rc;
686}
687
688static void __exit mvs_exit(void)
689{
690 pci_unregister_driver(&mvs_pci_driver);
691 sas_release_transport(mvs_stt);
692}
693
694module_init(mvs_init);
695module_exit(mvs_exit);
696
697MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
698MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
699MODULE_VERSION(DRV_VERSION);
700MODULE_LICENSE("GPL");
701#ifdef CONFIG_PCI
702MODULE_DEVICE_TABLE(pci, mvs_pci_table);
703#endif
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
new file mode 100644
index 000000000000..0d2138641214
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -0,0 +1,2154 @@
1/*
2 * Marvell 88SE64xx/88SE94xx main function
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#include "mv_sas.h"
26
27static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
28{
29 if (task->lldd_task) {
30 struct mvs_slot_info *slot;
31 slot = task->lldd_task;
32 *tag = slot->slot_tag;
33 return 1;
34 }
35 return 0;
36}
37
38void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
39{
40 void *bitmap = &mvi->tags;
41 clear_bit(tag, bitmap);
42}
43
44void mvs_tag_free(struct mvs_info *mvi, u32 tag)
45{
46 mvs_tag_clear(mvi, tag);
47}
48
49void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
50{
51 void *bitmap = &mvi->tags;
52 set_bit(tag, bitmap);
53}
54
55inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
56{
57 unsigned int index, tag;
58 void *bitmap = &mvi->tags;
59
60 index = find_first_zero_bit(bitmap, mvi->tags_num);
61 tag = index;
62 if (tag >= mvi->tags_num)
63 return -SAS_QUEUE_FULL;
64 mvs_tag_set(mvi, tag);
65 *tag_out = tag;
66 return 0;
67}
68
69void mvs_tag_init(struct mvs_info *mvi)
70{
71 int i;
72 for (i = 0; i < mvi->tags_num; ++i)
73 mvs_tag_clear(mvi, i);
74}
75
76void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
77{
78 u32 i;
79 u32 run;
80 u32 offset;
81
82 offset = 0;
83 while (size) {
84 printk(KERN_DEBUG"%08X : ", baseaddr + offset);
85 if (size >= 16)
86 run = 16;
87 else
88 run = size;
89 size -= run;
90 for (i = 0; i < 16; i++) {
91 if (i < run)
92 printk(KERN_DEBUG"%02X ", (u32)data[i]);
93 else
94 printk(KERN_DEBUG" ");
95 }
96 printk(KERN_DEBUG": ");
97 for (i = 0; i < run; i++)
98 printk(KERN_DEBUG"%c",
99 isalnum(data[i]) ? data[i] : '.');
100 printk(KERN_DEBUG"\n");
101 data = &data[16];
102 offset += run;
103 }
104 printk(KERN_DEBUG"\n");
105}
106
107#if (_MV_DUMP > 1)
108static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
109 enum sas_protocol proto)
110{
111 u32 offset;
112 struct mvs_slot_info *slot = &mvi->slot_info[tag];
113
114 offset = slot->cmd_size + MVS_OAF_SZ +
115 MVS_CHIP_DISP->prd_size() * slot->n_elem;
116 dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
117 tag);
118 mvs_hexdump(32, (u8 *) slot->response,
119 (u32) slot->buf_dma + offset);
120}
121#endif
122
123static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
124 enum sas_protocol proto)
125{
126#if (_MV_DUMP > 1)
127 u32 sz, w_ptr;
128 u64 addr;
129 struct mvs_slot_info *slot = &mvi->slot_info[tag];
130
131 /*Delivery Queue */
132 sz = MVS_CHIP_SLOT_SZ;
133 w_ptr = slot->tx;
134 addr = mvi->tx_dma;
135 dev_printk(KERN_DEBUG, mvi->dev,
136 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
137 dev_printk(KERN_DEBUG, mvi->dev,
138 "Delivery Queue Base Address=0x%llX (PA)"
139 "(tx_dma=0x%llX), Entry=%04d\n",
140 addr, (unsigned long long)mvi->tx_dma, w_ptr);
141 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
142 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
143 /*Command List */
144 addr = mvi->slot_dma;
145 dev_printk(KERN_DEBUG, mvi->dev,
146 "Command List Base Address=0x%llX (PA)"
147 "(slot_dma=0x%llX), Header=%03d\n",
148 addr, (unsigned long long)slot->buf_dma, tag);
149 dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
150 /*mvs_cmd_hdr */
151 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
152 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
153 /*1.command table area */
154 dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
155 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
156 /*2.open address frame area */
157 dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
158 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
159 (u32) slot->buf_dma + slot->cmd_size);
160 /*3.status buffer */
161 mvs_hba_sb_dump(mvi, tag, proto);
162 /*4.PRD table */
163 dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
164 mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
165 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
166 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
167#endif
168}
169
170static void mvs_hba_cq_dump(struct mvs_info *mvi)
171{
172#if (_MV_DUMP > 2)
173 u64 addr;
174 void __iomem *regs = mvi->regs;
175 u32 entry = mvi->rx_cons + 1;
176 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
177
178 /*Completion Queue */
179 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
180 dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
181 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
182 dev_printk(KERN_DEBUG, mvi->dev,
183 "Completion List Base Address=0x%llX (PA), "
184 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
185 addr, entry - 1, mvi->rx[0]);
186 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
187 mvi->rx_dma + sizeof(u32) * entry);
188#endif
189}
190
191void mvs_get_sas_addr(void *buf, u32 buflen)
192{
193 /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
194}
195
196struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
197{
198 unsigned long i = 0, j = 0, hi = 0;
199 struct sas_ha_struct *sha = dev->port->ha;
200 struct mvs_info *mvi = NULL;
201 struct asd_sas_phy *phy;
202
203 while (sha->sas_port[i]) {
204 if (sha->sas_port[i] == dev->port) {
205 phy = container_of(sha->sas_port[i]->phy_list.next,
206 struct asd_sas_phy, port_phy_el);
207 j = 0;
208 while (sha->sas_phy[j]) {
209 if (sha->sas_phy[j] == phy)
210 break;
211 j++;
212 }
213 break;
214 }
215 i++;
216 }
217 hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
218 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
219
220 return mvi;
221
222}
223
224/* FIXME */
225int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
226{
227 unsigned long i = 0, j = 0, n = 0, num = 0;
228 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
229 struct mvs_info *mvi = mvi_dev->mvi_info;
230 struct sas_ha_struct *sha = dev->port->ha;
231
232 while (sha->sas_port[i]) {
233 if (sha->sas_port[i] == dev->port) {
234 struct asd_sas_phy *phy;
235 list_for_each_entry(phy,
236 &sha->sas_port[i]->phy_list, port_phy_el) {
237 j = 0;
238 while (sha->sas_phy[j]) {
239 if (sha->sas_phy[j] == phy)
240 break;
241 j++;
242 }
243 phyno[n] = (j >= mvi->chip->n_phy) ?
244 (j - mvi->chip->n_phy) : j;
245 num++;
246 n++;
247 }
248 break;
249 }
250 i++;
251 }
252 return num;
253}
254
255static inline void mvs_free_reg_set(struct mvs_info *mvi,
256 struct mvs_device *dev)
257{
258 if (!dev) {
259 mv_printk("device has been free.\n");
260 return;
261 }
262 if (dev->runing_req != 0)
263 return;
264 if (dev->taskfileset == MVS_ID_NOT_MAPPED)
265 return;
266 MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
267}
268
269static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
270 struct mvs_device *dev)
271{
272 if (dev->taskfileset != MVS_ID_NOT_MAPPED)
273 return 0;
274 return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
275}
276
277void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
278{
279 u32 no;
280 for_each_phy(phy_mask, phy_mask, no) {
281 if (!(phy_mask & 1))
282 continue;
283 MVS_CHIP_DISP->phy_reset(mvi, no, hard);
284 }
285}
286
287/* FIXME: locking? */
288int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
289 void *funcdata)
290{
291 int rc = 0, phy_id = sas_phy->id;
292 u32 tmp, i = 0, hi;
293 struct sas_ha_struct *sha = sas_phy->ha;
294 struct mvs_info *mvi = NULL;
295
296 while (sha->sas_phy[i]) {
297 if (sha->sas_phy[i] == sas_phy)
298 break;
299 i++;
300 }
301 hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
302 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
303
304 switch (func) {
305 case PHY_FUNC_SET_LINK_RATE:
306 MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
307 break;
308
309 case PHY_FUNC_HARD_RESET:
310 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
311 if (tmp & PHY_RST_HARD)
312 break;
313 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1);
314 break;
315
316 case PHY_FUNC_LINK_RESET:
317 MVS_CHIP_DISP->phy_enable(mvi, phy_id);
318 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0);
319 break;
320
321 case PHY_FUNC_DISABLE:
322 MVS_CHIP_DISP->phy_disable(mvi, phy_id);
323 break;
324 case PHY_FUNC_RELEASE_SPINUP_HOLD:
325 default:
326 rc = -EOPNOTSUPP;
327 }
328 msleep(200);
329 return rc;
330}
331
332void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
333 u32 off_lo, u32 off_hi, u64 sas_addr)
334{
335 u32 lo = (u32)sas_addr;
336 u32 hi = (u32)(sas_addr>>32);
337
338 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
339 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
340 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
341 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
342}
343
344static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
345{
346 struct mvs_phy *phy = &mvi->phy[i];
347 struct asd_sas_phy *sas_phy = &phy->sas_phy;
348 struct sas_ha_struct *sas_ha;
349 if (!phy->phy_attached)
350 return;
351
352 if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
353 && phy->phy_type & PORT_TYPE_SAS) {
354 return;
355 }
356
357 sas_ha = mvi->sas;
358 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
359
360 if (sas_phy->phy) {
361 struct sas_phy *sphy = sas_phy->phy;
362
363 sphy->negotiated_linkrate = sas_phy->linkrate;
364 sphy->minimum_linkrate = phy->minimum_linkrate;
365 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
366 sphy->maximum_linkrate = phy->maximum_linkrate;
367 sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
368 }
369
370 if (phy->phy_type & PORT_TYPE_SAS) {
371 struct sas_identify_frame *id;
372
373 id = (struct sas_identify_frame *)phy->frame_rcvd;
374 id->dev_type = phy->identify.device_type;
375 id->initiator_bits = SAS_PROTOCOL_ALL;
376 id->target_bits = phy->identify.target_port_protocols;
377 } else if (phy->phy_type & PORT_TYPE_SATA) {
378 /*Nothing*/
379 }
380 mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
381
382 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
383
384 mvi->sas->notify_port_event(sas_phy,
385 PORTE_BYTES_DMAED);
386}
387
388int mvs_slave_alloc(struct scsi_device *scsi_dev)
389{
390 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
391 if (dev_is_sata(dev)) {
392 /* We don't need to rescan targets
393 * if REPORT_LUNS request is failed
394 */
395 if (scsi_dev->lun > 0)
396 return -ENXIO;
397 scsi_dev->tagged_supported = 1;
398 }
399
400 return sas_slave_alloc(scsi_dev);
401}
402
403int mvs_slave_configure(struct scsi_device *sdev)
404{
405 struct domain_device *dev = sdev_to_domain_dev(sdev);
406 int ret = sas_slave_configure(sdev);
407
408 if (ret)
409 return ret;
410 if (dev_is_sata(dev)) {
411 /* may set PIO mode */
412 #if MV_DISABLE_NCQ
413 struct ata_port *ap = dev->sata_dev.ap;
414 struct ata_device *adev = ap->link.device;
415 adev->flags |= ATA_DFLAG_NCQ_OFF;
416 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
417 #endif
418 }
419 return 0;
420}
421
422void mvs_scan_start(struct Scsi_Host *shost)
423{
424 int i, j;
425 unsigned short core_nr;
426 struct mvs_info *mvi;
427 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
428
429 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
430
431 for (j = 0; j < core_nr; j++) {
432 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
433 for (i = 0; i < mvi->chip->n_phy; ++i)
434 mvs_bytes_dmaed(mvi, i);
435 }
436}
437
438int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
439{
440 /* give the phy enabling interrupt event time to come in (1s
441 * is empirically about all it takes) */
442 if (time < HZ)
443 return 0;
444 /* Wait for discovery to finish */
445 scsi_flush_work(shost);
446 return 1;
447}
448
449static int mvs_task_prep_smp(struct mvs_info *mvi,
450 struct mvs_task_exec_info *tei)
451{
452 int elem, rc, i;
453 struct sas_task *task = tei->task;
454 struct mvs_cmd_hdr *hdr = tei->hdr;
455 struct domain_device *dev = task->dev;
456 struct asd_sas_port *sas_port = dev->port;
457 struct scatterlist *sg_req, *sg_resp;
458 u32 req_len, resp_len, tag = tei->tag;
459 void *buf_tmp;
460 u8 *buf_oaf;
461 dma_addr_t buf_tmp_dma;
462 void *buf_prd;
463 struct mvs_slot_info *slot = &mvi->slot_info[tag];
464 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
465#if _MV_DUMP
466 u8 *buf_cmd;
467 void *from;
468#endif
469 /*
470 * DMA-map SMP request, response buffers
471 */
472 sg_req = &task->smp_task.smp_req;
473 elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
474 if (!elem)
475 return -ENOMEM;
476 req_len = sg_dma_len(sg_req);
477
478 sg_resp = &task->smp_task.smp_resp;
479 elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
480 if (!elem) {
481 rc = -ENOMEM;
482 goto err_out;
483 }
484 resp_len = SB_RFB_MAX;
485
486 /* must be in dwords */
487 if ((req_len & 0x3) || (resp_len & 0x3)) {
488 rc = -EINVAL;
489 goto err_out_2;
490 }
491
492 /*
493 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
494 */
495
496 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
497 buf_tmp = slot->buf;
498 buf_tmp_dma = slot->buf_dma;
499
500#if _MV_DUMP
501 buf_cmd = buf_tmp;
502 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
503 buf_tmp += req_len;
504 buf_tmp_dma += req_len;
505 slot->cmd_size = req_len;
506#else
507 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
508#endif
509
510 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
511 buf_oaf = buf_tmp;
512 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
513
514 buf_tmp += MVS_OAF_SZ;
515 buf_tmp_dma += MVS_OAF_SZ;
516
517 /* region 3: PRD table *********************************** */
518 buf_prd = buf_tmp;
519 if (tei->n_elem)
520 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
521 else
522 hdr->prd_tbl = 0;
523
524 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
525 buf_tmp += i;
526 buf_tmp_dma += i;
527
528 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
529 slot->response = buf_tmp;
530 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
531 if (mvi->flags & MVF_FLAG_SOC)
532 hdr->reserved[0] = 0;
533
534 /*
535 * Fill in TX ring and command slot header
536 */
537 slot->tx = mvi->tx_prod;
538 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
539 TXQ_MODE_I | tag |
540 (sas_port->phy_mask << TXQ_PHY_SHIFT));
541
542 hdr->flags |= flags;
543 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
544 hdr->tags = cpu_to_le32(tag);
545 hdr->data_len = 0;
546
547 /* generate open address frame hdr (first 12 bytes) */
548 /* initiator, SMP, ftype 1h */
549 buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
550 buf_oaf[1] = dev->linkrate & 0xf;
551 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
552 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
553
554 /* fill in PRD (scatter/gather) table, if any */
555 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
556
557#if _MV_DUMP
558 /* copy cmd table */
559 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
560 memcpy(buf_cmd, from + sg_req->offset, req_len);
561 kunmap_atomic(from, KM_IRQ0);
562#endif
563 return 0;
564
565err_out_2:
566 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
567 PCI_DMA_FROMDEVICE);
568err_out:
569 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
570 PCI_DMA_TODEVICE);
571 return rc;
572}
573
574static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
575{
576 struct ata_queued_cmd *qc = task->uldd_task;
577
578 if (qc) {
579 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
580 qc->tf.command == ATA_CMD_FPDMA_READ) {
581 *tag = qc->tag;
582 return 1;
583 }
584 }
585
586 return 0;
587}
588
589static int mvs_task_prep_ata(struct mvs_info *mvi,
590 struct mvs_task_exec_info *tei)
591{
592 struct sas_task *task = tei->task;
593 struct domain_device *dev = task->dev;
594 struct mvs_device *mvi_dev = dev->lldd_dev;
595 struct mvs_cmd_hdr *hdr = tei->hdr;
596 struct asd_sas_port *sas_port = dev->port;
597 struct mvs_slot_info *slot;
598 void *buf_prd;
599 u32 tag = tei->tag, hdr_tag;
600 u32 flags, del_q;
601 void *buf_tmp;
602 u8 *buf_cmd, *buf_oaf;
603 dma_addr_t buf_tmp_dma;
604 u32 i, req_len, resp_len;
605 const u32 max_resp_len = SB_RFB_MAX;
606
607 if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
608 mv_dprintk("Have not enough regiset for dev %d.\n",
609 mvi_dev->device_id);
610 return -EBUSY;
611 }
612 slot = &mvi->slot_info[tag];
613 slot->tx = mvi->tx_prod;
614 del_q = TXQ_MODE_I | tag |
615 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
616 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
617 (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
618 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
619
620#ifndef DISABLE_HOTPLUG_DMA_FIX
621 if (task->data_dir == DMA_FROM_DEVICE)
622 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
623 else
624 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
625#else
626 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
627#endif
628 if (task->ata_task.use_ncq)
629 flags |= MCH_FPDMA;
630 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
631 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
632 flags |= MCH_ATAPI;
633 }
634
635 /* FIXME: fill in port multiplier number */
636
637 hdr->flags = cpu_to_le32(flags);
638
639 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
640 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
641 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
642 else
643 hdr_tag = tag;
644
645 hdr->tags = cpu_to_le32(hdr_tag);
646
647 hdr->data_len = cpu_to_le32(task->total_xfer_len);
648
649 /*
650 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
651 */
652
653 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
654 buf_cmd = buf_tmp = slot->buf;
655 buf_tmp_dma = slot->buf_dma;
656
657 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
658
659 buf_tmp += MVS_ATA_CMD_SZ;
660 buf_tmp_dma += MVS_ATA_CMD_SZ;
661#if _MV_DUMP
662 slot->cmd_size = MVS_ATA_CMD_SZ;
663#endif
664
665 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
666 /* used for STP. unused for SATA? */
667 buf_oaf = buf_tmp;
668 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
669
670 buf_tmp += MVS_OAF_SZ;
671 buf_tmp_dma += MVS_OAF_SZ;
672
673 /* region 3: PRD table ********************************************* */
674 buf_prd = buf_tmp;
675
676 if (tei->n_elem)
677 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
678 else
679 hdr->prd_tbl = 0;
680 i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
681
682 buf_tmp += i;
683 buf_tmp_dma += i;
684
685 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
686 /* FIXME: probably unused, for SATA. kept here just in case
687 * we get a STP/SATA error information record
688 */
689 slot->response = buf_tmp;
690 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
691 if (mvi->flags & MVF_FLAG_SOC)
692 hdr->reserved[0] = 0;
693
694 req_len = sizeof(struct host_to_dev_fis);
695 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
696 sizeof(struct mvs_err_info) - i;
697
698 /* request, response lengths */
699 resp_len = min(resp_len, max_resp_len);
700 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
701
702 if (likely(!task->ata_task.device_control_reg_update))
703 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
704 /* fill in command FIS and ATAPI CDB */
705 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
706 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
707 memcpy(buf_cmd + STP_ATAPI_CMD,
708 task->ata_task.atapi_packet, 16);
709
710 /* generate open address frame hdr (first 12 bytes) */
711 /* initiator, STP, ftype 1h */
712 buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
713 buf_oaf[1] = dev->linkrate & 0xf;
714 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
715 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
716
717 /* fill in PRD (scatter/gather) table, if any */
718 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
719#ifndef DISABLE_HOTPLUG_DMA_FIX
720 if (task->data_dir == DMA_FROM_DEVICE)
721 MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma,
722 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
723#endif
724 return 0;
725}
726
727static int mvs_task_prep_ssp(struct mvs_info *mvi,
728 struct mvs_task_exec_info *tei, int is_tmf,
729 struct mvs_tmf_task *tmf)
730{
731 struct sas_task *task = tei->task;
732 struct mvs_cmd_hdr *hdr = tei->hdr;
733 struct mvs_port *port = tei->port;
734 struct domain_device *dev = task->dev;
735 struct mvs_device *mvi_dev = dev->lldd_dev;
736 struct asd_sas_port *sas_port = dev->port;
737 struct mvs_slot_info *slot;
738 void *buf_prd;
739 struct ssp_frame_hdr *ssp_hdr;
740 void *buf_tmp;
741 u8 *buf_cmd, *buf_oaf, fburst = 0;
742 dma_addr_t buf_tmp_dma;
743 u32 flags;
744 u32 resp_len, req_len, i, tag = tei->tag;
745 const u32 max_resp_len = SB_RFB_MAX;
746 u32 phy_mask;
747
748 slot = &mvi->slot_info[tag];
749
750 phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
751 sas_port->phy_mask) & TXQ_PHY_MASK;
752
753 slot->tx = mvi->tx_prod;
754 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
755 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
756 (phy_mask << TXQ_PHY_SHIFT));
757
758 flags = MCH_RETRY;
759 if (task->ssp_task.enable_first_burst) {
760 flags |= MCH_FBURST;
761 fburst = (1 << 7);
762 }
763 if (is_tmf)
764 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
765 else
766 flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
767 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
768 hdr->tags = cpu_to_le32(tag);
769 hdr->data_len = cpu_to_le32(task->total_xfer_len);
770
771 /*
772 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
773 */
774
775 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
776 buf_cmd = buf_tmp = slot->buf;
777 buf_tmp_dma = slot->buf_dma;
778
779 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
780
781 buf_tmp += MVS_SSP_CMD_SZ;
782 buf_tmp_dma += MVS_SSP_CMD_SZ;
783#if _MV_DUMP
784 slot->cmd_size = MVS_SSP_CMD_SZ;
785#endif
786
787 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
788 buf_oaf = buf_tmp;
789 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
790
791 buf_tmp += MVS_OAF_SZ;
792 buf_tmp_dma += MVS_OAF_SZ;
793
794 /* region 3: PRD table ********************************************* */
795 buf_prd = buf_tmp;
796 if (tei->n_elem)
797 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
798 else
799 hdr->prd_tbl = 0;
800
801 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
802 buf_tmp += i;
803 buf_tmp_dma += i;
804
805 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
806 slot->response = buf_tmp;
807 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
808 if (mvi->flags & MVF_FLAG_SOC)
809 hdr->reserved[0] = 0;
810
811 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
812 sizeof(struct mvs_err_info) - i;
813 resp_len = min(resp_len, max_resp_len);
814
815 req_len = sizeof(struct ssp_frame_hdr) + 28;
816
817 /* request, response lengths */
818 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
819
820 /* generate open address frame hdr (first 12 bytes) */
821 /* initiator, SSP, ftype 1h */
822 buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
823 buf_oaf[1] = dev->linkrate & 0xf;
824 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
825 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
826
827 /* fill in SSP frame header (Command Table.SSP frame header) */
828 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
829
830 if (is_tmf)
831 ssp_hdr->frame_type = SSP_TASK;
832 else
833 ssp_hdr->frame_type = SSP_COMMAND;
834
835 memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
836 HASHED_SAS_ADDR_SIZE);
837 memcpy(ssp_hdr->hashed_src_addr,
838 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
839 ssp_hdr->tag = cpu_to_be16(tag);
840
841 /* fill in IU for TASK and Command Frame */
842 buf_cmd += sizeof(*ssp_hdr);
843 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
844
845 if (ssp_hdr->frame_type != SSP_TASK) {
846 buf_cmd[9] = fburst | task->ssp_task.task_attr |
847 (task->ssp_task.task_prio << 3);
848 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
849 } else{
850 buf_cmd[10] = tmf->tmf;
851 switch (tmf->tmf) {
852 case TMF_ABORT_TASK:
853 case TMF_QUERY_TASK:
854 buf_cmd[12] =
855 (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
856 buf_cmd[13] =
857 tmf->tag_of_task_to_be_managed & 0xff;
858 break;
859 default:
860 break;
861 }
862 }
863 /* fill in PRD (scatter/gather) table, if any */
864 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
865 return 0;
866}
867
868#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
869static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
870 struct completion *completion,int is_tmf,
871 struct mvs_tmf_task *tmf)
872{
873 struct domain_device *dev = task->dev;
874 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
875 struct mvs_info *mvi = mvi_dev->mvi_info;
876 struct mvs_task_exec_info tei;
877 struct sas_task *t = task;
878 struct mvs_slot_info *slot;
879 u32 tag = 0xdeadbeef, rc, n_elem = 0;
880 u32 n = num, pass = 0;
881 unsigned long flags = 0;
882
883 if (!dev->port) {
884 struct task_status_struct *tsm = &t->task_status;
885
886 tsm->resp = SAS_TASK_UNDELIVERED;
887 tsm->stat = SAS_PHY_DOWN;
888 t->task_done(t);
889 return 0;
890 }
891
892 spin_lock_irqsave(&mvi->lock, flags);
893 do {
894 dev = t->dev;
895 mvi_dev = dev->lldd_dev;
896 if (DEV_IS_GONE(mvi_dev)) {
897 if (mvi_dev)
898 mv_dprintk("device %d not ready.\n",
899 mvi_dev->device_id);
900 else
901 mv_dprintk("device %016llx not ready.\n",
902 SAS_ADDR(dev->sas_addr));
903
904 rc = SAS_PHY_DOWN;
905 goto out_done;
906 }
907
908 if (dev->port->id >= mvi->chip->n_phy)
909 tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy];
910 else
911 tei.port = &mvi->port[dev->port->id];
912
913 if (!tei.port->port_attached) {
914 if (sas_protocol_ata(t->task_proto)) {
915 mv_dprintk("port %d does not"
916 "attached device.\n", dev->port->id);
917 rc = SAS_PHY_DOWN;
918 goto out_done;
919 } else {
920 struct task_status_struct *ts = &t->task_status;
921 ts->resp = SAS_TASK_UNDELIVERED;
922 ts->stat = SAS_PHY_DOWN;
923 t->task_done(t);
924 if (n > 1)
925 t = list_entry(t->list.next,
926 struct sas_task, list);
927 continue;
928 }
929 }
930
931 if (!sas_protocol_ata(t->task_proto)) {
932 if (t->num_scatter) {
933 n_elem = dma_map_sg(mvi->dev,
934 t->scatter,
935 t->num_scatter,
936 t->data_dir);
937 if (!n_elem) {
938 rc = -ENOMEM;
939 goto err_out;
940 }
941 }
942 } else {
943 n_elem = t->num_scatter;
944 }
945
946 rc = mvs_tag_alloc(mvi, &tag);
947 if (rc)
948 goto err_out;
949
950 slot = &mvi->slot_info[tag];
951
952
953 t->lldd_task = NULL;
954 slot->n_elem = n_elem;
955 slot->slot_tag = tag;
956 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
957
958 tei.task = t;
959 tei.hdr = &mvi->slot[tag];
960 tei.tag = tag;
961 tei.n_elem = n_elem;
962 switch (t->task_proto) {
963 case SAS_PROTOCOL_SMP:
964 rc = mvs_task_prep_smp(mvi, &tei);
965 break;
966 case SAS_PROTOCOL_SSP:
967 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
968 break;
969 case SAS_PROTOCOL_SATA:
970 case SAS_PROTOCOL_STP:
971 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
972 rc = mvs_task_prep_ata(mvi, &tei);
973 break;
974 default:
975 dev_printk(KERN_ERR, mvi->dev,
976 "unknown sas_task proto: 0x%x\n",
977 t->task_proto);
978 rc = -EINVAL;
979 break;
980 }
981
982 if (rc) {
983 mv_dprintk("rc is %x\n", rc);
984 goto err_out_tag;
985 }
986 slot->task = t;
987 slot->port = tei.port;
988 t->lldd_task = slot;
989 list_add_tail(&slot->entry, &tei.port->list);
990 /* TODO: select normal or high priority */
991 spin_lock(&t->task_state_lock);
992 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
993 spin_unlock(&t->task_state_lock);
994
995 mvs_hba_memory_dump(mvi, tag, t->task_proto);
996 mvi_dev->runing_req++;
997 ++pass;
998 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
999 if (n > 1)
1000 t = list_entry(t->list.next, struct sas_task, list);
1001 } while (--n);
1002 rc = 0;
1003 goto out_done;
1004
1005err_out_tag:
1006 mvs_tag_free(mvi, tag);
1007err_out:
1008
1009 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
1010 if (!sas_protocol_ata(t->task_proto))
1011 if (n_elem)
1012 dma_unmap_sg(mvi->dev, t->scatter, n_elem,
1013 t->data_dir);
1014out_done:
1015 if (likely(pass)) {
1016 MVS_CHIP_DISP->start_delivery(mvi,
1017 (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1018 }
1019 spin_unlock_irqrestore(&mvi->lock, flags);
1020 return rc;
1021}
1022
1023int mvs_queue_command(struct sas_task *task, const int num,
1024 gfp_t gfp_flags)
1025{
1026 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
1027}
1028
1029static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
1030{
1031 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1032 mvs_tag_clear(mvi, slot_idx);
1033}
1034
1035static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1036 struct mvs_slot_info *slot, u32 slot_idx)
1037{
1038 if (!slot->task)
1039 return;
1040 if (!sas_protocol_ata(task->task_proto))
1041 if (slot->n_elem)
1042 dma_unmap_sg(mvi->dev, task->scatter,
1043 slot->n_elem, task->data_dir);
1044
1045 switch (task->task_proto) {
1046 case SAS_PROTOCOL_SMP:
1047 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
1048 PCI_DMA_FROMDEVICE);
1049 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
1050 PCI_DMA_TODEVICE);
1051 break;
1052
1053 case SAS_PROTOCOL_SATA:
1054 case SAS_PROTOCOL_STP:
1055 case SAS_PROTOCOL_SSP:
1056 default:
1057 /* do nothing */
1058 break;
1059 }
1060 list_del_init(&slot->entry);
1061 task->lldd_task = NULL;
1062 slot->task = NULL;
1063 slot->port = NULL;
1064 slot->slot_tag = 0xFFFFFFFF;
1065 mvs_slot_free(mvi, slot_idx);
1066}
1067
1068static void mvs_update_wideport(struct mvs_info *mvi, int i)
1069{
1070 struct mvs_phy *phy = &mvi->phy[i];
1071 struct mvs_port *port = phy->port;
1072 int j, no;
1073
1074 for_each_phy(port->wide_port_phymap, j, no) {
1075 if (j & 1) {
1076 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1077 PHYR_WIDE_PORT);
1078 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1079 port->wide_port_phymap);
1080 } else {
1081 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1082 PHYR_WIDE_PORT);
1083 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1084 0);
1085 }
1086 }
1087}
1088
1089static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
1090{
1091 u32 tmp;
1092 struct mvs_phy *phy = &mvi->phy[i];
1093 struct mvs_port *port = phy->port;
1094
1095 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
1096 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
1097 if (!port)
1098 phy->phy_attached = 1;
1099 return tmp;
1100 }
1101
1102 if (port) {
1103 if (phy->phy_type & PORT_TYPE_SAS) {
1104 port->wide_port_phymap &= ~(1U << i);
1105 if (!port->wide_port_phymap)
1106 port->port_attached = 0;
1107 mvs_update_wideport(mvi, i);
1108 } else if (phy->phy_type & PORT_TYPE_SATA)
1109 port->port_attached = 0;
1110 phy->port = NULL;
1111 phy->phy_attached = 0;
1112 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
1113 }
1114 return 0;
1115}
1116
1117static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
1118{
1119 u32 *s = (u32 *) buf;
1120
1121 if (!s)
1122 return NULL;
1123
1124 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
1125 s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1126
1127 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
1128 s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1129
1130 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
1131 s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1132
1133 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
1134 s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1135
1136 /* Workaround: take some ATAPI devices for ATA */
1137 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
1138 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
1139
1140 return s;
1141}
1142
1143static u32 mvs_is_sig_fis_received(u32 irq_status)
1144{
1145 return irq_status & PHYEV_SIG_FIS;
1146}
1147
1148void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1149{
1150 struct mvs_phy *phy = &mvi->phy[i];
1151 struct sas_identify_frame *id;
1152
1153 id = (struct sas_identify_frame *)phy->frame_rcvd;
1154
1155 if (get_st) {
1156 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
1157 phy->phy_status = mvs_is_phy_ready(mvi, i);
1158 }
1159
1160 if (phy->phy_status) {
1161 int oob_done = 0;
1162 struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
1163
1164 oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
1165
1166 MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
1167 if (phy->phy_type & PORT_TYPE_SATA) {
1168 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1169 if (mvs_is_sig_fis_received(phy->irq_status)) {
1170 phy->phy_attached = 1;
1171 phy->att_dev_sas_addr =
1172 i + mvi->id * mvi->chip->n_phy;
1173 if (oob_done)
1174 sas_phy->oob_mode = SATA_OOB_MODE;
1175 phy->frame_rcvd_size =
1176 sizeof(struct dev_to_host_fis);
1177 mvs_get_d2h_reg(mvi, i, id);
1178 } else {
1179 u32 tmp;
1180 dev_printk(KERN_DEBUG, mvi->dev,
1181 "Phy%d : No sig fis\n", i);
1182 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
1183 MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
1184 tmp | PHYEV_SIG_FIS);
1185 phy->phy_attached = 0;
1186 phy->phy_type &= ~PORT_TYPE_SATA;
1187 MVS_CHIP_DISP->phy_reset(mvi, i, 0);
1188 goto out_done;
1189 }
1190 } else if (phy->phy_type & PORT_TYPE_SAS
1191 || phy->att_dev_info & PORT_SSP_INIT_MASK) {
1192 phy->phy_attached = 1;
1193 phy->identify.device_type =
1194 phy->att_dev_info & PORT_DEV_TYPE_MASK;
1195
1196 if (phy->identify.device_type == SAS_END_DEV)
1197 phy->identify.target_port_protocols =
1198 SAS_PROTOCOL_SSP;
1199 else if (phy->identify.device_type != NO_DEVICE)
1200 phy->identify.target_port_protocols =
1201 SAS_PROTOCOL_SMP;
1202 if (oob_done)
1203 sas_phy->oob_mode = SAS_OOB_MODE;
1204 phy->frame_rcvd_size =
1205 sizeof(struct sas_identify_frame);
1206 }
1207 memcpy(sas_phy->attached_sas_addr,
1208 &phy->att_dev_sas_addr, SAS_ADDR_SIZE);
1209
1210 if (MVS_CHIP_DISP->phy_work_around)
1211 MVS_CHIP_DISP->phy_work_around(mvi, i);
1212 }
1213 mv_dprintk("port %d attach dev info is %x\n",
1214 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
1215 mv_dprintk("port %d attach sas addr is %llx\n",
1216 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
1217out_done:
1218 if (get_st)
1219 MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
1220}
1221
1222static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1223{
1224 struct sas_ha_struct *sas_ha = sas_phy->ha;
1225 struct mvs_info *mvi = NULL; int i = 0, hi;
1226 struct mvs_phy *phy = sas_phy->lldd_phy;
1227 struct asd_sas_port *sas_port = sas_phy->port;
1228 struct mvs_port *port;
1229 unsigned long flags = 0;
1230 if (!sas_port)
1231 return;
1232
1233 while (sas_ha->sas_phy[i]) {
1234 if (sas_ha->sas_phy[i] == sas_phy)
1235 break;
1236 i++;
1237 }
1238 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
1239 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
1240 if (sas_port->id >= mvi->chip->n_phy)
1241 port = &mvi->port[sas_port->id - mvi->chip->n_phy];
1242 else
1243 port = &mvi->port[sas_port->id];
1244 if (lock)
1245 spin_lock_irqsave(&mvi->lock, flags);
1246 port->port_attached = 1;
1247 phy->port = port;
1248 if (phy->phy_type & PORT_TYPE_SAS) {
1249 port->wide_port_phymap = sas_port->phy_mask;
1250 mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
1251 mvs_update_wideport(mvi, sas_phy->id);
1252 }
1253 if (lock)
1254 spin_unlock_irqrestore(&mvi->lock, flags);
1255}
1256
1257static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
1258{
1259 /*Nothing*/
1260}
1261
1262
1263void mvs_port_formed(struct asd_sas_phy *sas_phy)
1264{
1265 mvs_port_notify_formed(sas_phy, 1);
1266}
1267
1268void mvs_port_deformed(struct asd_sas_phy *sas_phy)
1269{
1270 mvs_port_notify_deformed(sas_phy, 1);
1271}
1272
1273struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
1274{
1275 u32 dev;
1276 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
1277 if (mvi->devices[dev].dev_type == NO_DEVICE) {
1278 mvi->devices[dev].device_id = dev;
1279 return &mvi->devices[dev];
1280 }
1281 }
1282
1283 if (dev == MVS_MAX_DEVICES)
1284 mv_printk("max support %d devices, ignore ..\n",
1285 MVS_MAX_DEVICES);
1286
1287 return NULL;
1288}
1289
1290void mvs_free_dev(struct mvs_device *mvi_dev)
1291{
1292 u32 id = mvi_dev->device_id;
1293 memset(mvi_dev, 0, sizeof(*mvi_dev));
1294 mvi_dev->device_id = id;
1295 mvi_dev->dev_type = NO_DEVICE;
1296 mvi_dev->dev_status = MVS_DEV_NORMAL;
1297 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
1298}
1299
1300int mvs_dev_found_notify(struct domain_device *dev, int lock)
1301{
1302 unsigned long flags = 0;
1303 int res = 0;
1304 struct mvs_info *mvi = NULL;
1305 struct domain_device *parent_dev = dev->parent;
1306 struct mvs_device *mvi_device;
1307
1308 mvi = mvs_find_dev_mvi(dev);
1309
1310 if (lock)
1311 spin_lock_irqsave(&mvi->lock, flags);
1312
1313 mvi_device = mvs_alloc_dev(mvi);
1314 if (!mvi_device) {
1315 res = -1;
1316 goto found_out;
1317 }
1318 dev->lldd_dev = mvi_device;
1319 mvi_device->dev_type = dev->dev_type;
1320 mvi_device->mvi_info = mvi;
1321 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
1322 int phy_id;
1323 u8 phy_num = parent_dev->ex_dev.num_phys;
1324 struct ex_phy *phy;
1325 for (phy_id = 0; phy_id < phy_num; phy_id++) {
1326 phy = &parent_dev->ex_dev.ex_phy[phy_id];
1327 if (SAS_ADDR(phy->attached_sas_addr) ==
1328 SAS_ADDR(dev->sas_addr)) {
1329 mvi_device->attached_phy = phy_id;
1330 break;
1331 }
1332 }
1333
1334 if (phy_id == phy_num) {
1335 mv_printk("Error: no attached dev:%016llx"
1336 "at ex:%016llx.\n",
1337 SAS_ADDR(dev->sas_addr),
1338 SAS_ADDR(parent_dev->sas_addr));
1339 res = -1;
1340 }
1341 }
1342
1343found_out:
1344 if (lock)
1345 spin_unlock_irqrestore(&mvi->lock, flags);
1346 return res;
1347}
1348
1349int mvs_dev_found(struct domain_device *dev)
1350{
1351 return mvs_dev_found_notify(dev, 1);
1352}
1353
1354void mvs_dev_gone_notify(struct domain_device *dev, int lock)
1355{
1356 unsigned long flags = 0;
1357 struct mvs_device *mvi_dev = dev->lldd_dev;
1358 struct mvs_info *mvi = mvi_dev->mvi_info;
1359
1360 if (lock)
1361 spin_lock_irqsave(&mvi->lock, flags);
1362
1363 if (mvi_dev) {
1364 mv_dprintk("found dev[%d:%x] is gone.\n",
1365 mvi_dev->device_id, mvi_dev->dev_type);
1366 mvs_free_reg_set(mvi, mvi_dev);
1367 mvs_free_dev(mvi_dev);
1368 } else {
1369 mv_dprintk("found dev has gone.\n");
1370 }
1371 dev->lldd_dev = NULL;
1372
1373 if (lock)
1374 spin_unlock_irqrestore(&mvi->lock, flags);
1375}
1376
1377
1378void mvs_dev_gone(struct domain_device *dev)
1379{
1380 mvs_dev_gone_notify(dev, 1);
1381}
1382
1383static struct sas_task *mvs_alloc_task(void)
1384{
1385 struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL);
1386
1387 if (task) {
1388 INIT_LIST_HEAD(&task->list);
1389 spin_lock_init(&task->task_state_lock);
1390 task->task_state_flags = SAS_TASK_STATE_PENDING;
1391 init_timer(&task->timer);
1392 init_completion(&task->completion);
1393 }
1394 return task;
1395}
1396
1397static void mvs_free_task(struct sas_task *task)
1398{
1399 if (task) {
1400 BUG_ON(!list_empty(&task->list));
1401 kfree(task);
1402 }
1403}
1404
1405static void mvs_task_done(struct sas_task *task)
1406{
1407 if (!del_timer(&task->timer))
1408 return;
1409 complete(&task->completion);
1410}
1411
1412static void mvs_tmf_timedout(unsigned long data)
1413{
1414 struct sas_task *task = (struct sas_task *)data;
1415
1416 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1417 complete(&task->completion);
1418}
1419
1420/* XXX */
1421#define MVS_TASK_TIMEOUT 20
1422static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1423 void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
1424{
1425 int res, retry;
1426 struct sas_task *task = NULL;
1427
1428 for (retry = 0; retry < 3; retry++) {
1429 task = mvs_alloc_task();
1430 if (!task)
1431 return -ENOMEM;
1432
1433 task->dev = dev;
1434 task->task_proto = dev->tproto;
1435
1436 memcpy(&task->ssp_task, parameter, para_len);
1437 task->task_done = mvs_task_done;
1438
1439 task->timer.data = (unsigned long) task;
1440 task->timer.function = mvs_tmf_timedout;
1441 task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1442 add_timer(&task->timer);
1443
1444 res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
1445
1446 if (res) {
1447 del_timer(&task->timer);
1448 mv_printk("executing internel task failed:%d\n", res);
1449 goto ex_err;
1450 }
1451
1452 wait_for_completion(&task->completion);
1453 res = -TMF_RESP_FUNC_FAILED;
1454 /* Even TMF timed out, return direct. */
1455 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1456 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1457 mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
1458 goto ex_err;
1459 }
1460 }
1461
1462 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1463 task->task_status.stat == SAM_GOOD) {
1464 res = TMF_RESP_FUNC_COMPLETE;
1465 break;
1466 }
1467
1468 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1469 task->task_status.stat == SAS_DATA_UNDERRUN) {
1470 /* no error, but return the number of bytes of
1471 * underrun */
1472 res = task->task_status.residual;
1473 break;
1474 }
1475
1476 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1477 task->task_status.stat == SAS_DATA_OVERRUN) {
1478 mv_dprintk("blocked task error.\n");
1479 res = -EMSGSIZE;
1480 break;
1481 } else {
1482 mv_dprintk(" task to dev %016llx response: 0x%x "
1483 "status 0x%x\n",
1484 SAS_ADDR(dev->sas_addr),
1485 task->task_status.resp,
1486 task->task_status.stat);
1487 mvs_free_task(task);
1488 task = NULL;
1489
1490 }
1491 }
1492ex_err:
1493 BUG_ON(retry == 3 && task != NULL);
1494 if (task != NULL)
1495 mvs_free_task(task);
1496 return res;
1497}
1498
1499static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1500 u8 *lun, struct mvs_tmf_task *tmf)
1501{
1502 struct sas_ssp_task ssp_task;
1503 DECLARE_COMPLETION_ONSTACK(completion);
1504 if (!(dev->tproto & SAS_PROTOCOL_SSP))
1505 return TMF_RESP_FUNC_ESUPP;
1506
1507 strncpy((u8 *)&ssp_task.LUN, lun, 8);
1508
1509 return mvs_exec_internal_tmf_task(dev, &ssp_task,
1510 sizeof(ssp_task), tmf);
1511}
1512
1513
1514/* Standard mandates link reset for ATA (type 0)
1515 and hard reset for SSP (type 1) , only for RECOVERY */
1516static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1517{
1518 int rc;
1519 struct sas_phy *phy = sas_find_local_phy(dev);
1520 int reset_type = (dev->dev_type == SATA_DEV ||
1521 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1522 rc = sas_phy_reset(phy, reset_type);
1523 msleep(2000);
1524 return rc;
1525}
1526
1527/* mandatory SAM-3 */
1528int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1529{
1530 unsigned long flags;
1531 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
1532 struct mvs_tmf_task tmf_task;
1533 struct mvs_device * mvi_dev = dev->lldd_dev;
1534 struct mvs_info *mvi = mvi_dev->mvi_info;
1535
1536 tmf_task.tmf = TMF_LU_RESET;
1537 mvi_dev->dev_status = MVS_DEV_EH;
1538 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1539 if (rc == TMF_RESP_FUNC_COMPLETE) {
1540 num = mvs_find_dev_phyno(dev, phyno);
1541 spin_lock_irqsave(&mvi->lock, flags);
1542 for (i = 0; i < num; i++)
1543 mvs_release_task(mvi, phyno[i], dev);
1544 spin_unlock_irqrestore(&mvi->lock, flags);
1545 }
1546 /* If failed, fall-through I_T_Nexus reset */
1547 mv_printk("%s for device[%x]:rc= %d\n", __func__,
1548 mvi_dev->device_id, rc);
1549 return rc;
1550}
1551
1552int mvs_I_T_nexus_reset(struct domain_device *dev)
1553{
1554 unsigned long flags;
1555 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
1556 struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
1557 struct mvs_info *mvi = mvi_dev->mvi_info;
1558
1559 if (mvi_dev->dev_status != MVS_DEV_EH)
1560 return TMF_RESP_FUNC_COMPLETE;
1561 rc = mvs_debug_I_T_nexus_reset(dev);
1562 mv_printk("%s for device[%x]:rc= %d\n",
1563 __func__, mvi_dev->device_id, rc);
1564
1565 /* housekeeper */
1566 num = mvs_find_dev_phyno(dev, phyno);
1567 spin_lock_irqsave(&mvi->lock, flags);
1568 for (i = 0; i < num; i++)
1569 mvs_release_task(mvi, phyno[i], dev);
1570 spin_unlock_irqrestore(&mvi->lock, flags);
1571
1572 return rc;
1573}
1574/* optional SAM-3 */
1575int mvs_query_task(struct sas_task *task)
1576{
1577 u32 tag;
1578 struct scsi_lun lun;
1579 struct mvs_tmf_task tmf_task;
1580 int rc = TMF_RESP_FUNC_FAILED;
1581
1582 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1583 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1584 struct domain_device *dev = task->dev;
1585 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1586 struct mvs_info *mvi = mvi_dev->mvi_info;
1587
1588 int_to_scsilun(cmnd->device->lun, &lun);
1589 rc = mvs_find_tag(mvi, task, &tag);
1590 if (rc == 0) {
1591 rc = TMF_RESP_FUNC_FAILED;
1592 return rc;
1593 }
1594
1595 tmf_task.tmf = TMF_QUERY_TASK;
1596 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1597
1598 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1599 switch (rc) {
1600 /* The task is still in Lun, release it then */
1601 case TMF_RESP_FUNC_SUCC:
1602 /* The task is not in Lun or failed, reset the phy */
1603 case TMF_RESP_FUNC_FAILED:
1604 case TMF_RESP_FUNC_COMPLETE:
1605 break;
1606 }
1607 }
1608 mv_printk("%s:rc= %d\n", __func__, rc);
1609 return rc;
1610}
1611
1612/* mandatory SAM-3, still need free task/slot info */
1613int mvs_abort_task(struct sas_task *task)
1614{
1615 struct scsi_lun lun;
1616 struct mvs_tmf_task tmf_task;
1617 struct domain_device *dev = task->dev;
1618 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1619 struct mvs_info *mvi = mvi_dev->mvi_info;
1620 int rc = TMF_RESP_FUNC_FAILED;
1621 unsigned long flags;
1622 u32 tag;
1623
1624 if (mvi->exp_req)
1625 mvi->exp_req--;
1626 spin_lock_irqsave(&task->task_state_lock, flags);
1627 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1628 spin_unlock_irqrestore(&task->task_state_lock, flags);
1629 rc = TMF_RESP_FUNC_COMPLETE;
1630 goto out;
1631 }
1632 spin_unlock_irqrestore(&task->task_state_lock, flags);
1633 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1634 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1635
1636 int_to_scsilun(cmnd->device->lun, &lun);
1637 rc = mvs_find_tag(mvi, task, &tag);
1638 if (rc == 0) {
1639 mv_printk("No such tag in %s\n", __func__);
1640 rc = TMF_RESP_FUNC_FAILED;
1641 return rc;
1642 }
1643
1644 tmf_task.tmf = TMF_ABORT_TASK;
1645 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1646
1647 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1648
1649 /* if successful, clear the task and callback forwards.*/
1650 if (rc == TMF_RESP_FUNC_COMPLETE) {
1651 u32 slot_no;
1652 struct mvs_slot_info *slot;
1653
1654 if (task->lldd_task) {
1655 slot = task->lldd_task;
1656 slot_no = (u32) (slot - mvi->slot_info);
1657 mvs_slot_complete(mvi, slot_no, 1);
1658 }
1659 }
1660 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1661 task->task_proto & SAS_PROTOCOL_STP) {
1662 /* to do free register_set */
1663 } else {
1664 /* SMP */
1665
1666 }
1667out:
1668 if (rc != TMF_RESP_FUNC_COMPLETE)
1669 mv_printk("%s:rc= %d\n", __func__, rc);
1670 return rc;
1671}
1672
1673int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
1674{
1675 int rc = TMF_RESP_FUNC_FAILED;
1676 struct mvs_tmf_task tmf_task;
1677
1678 tmf_task.tmf = TMF_ABORT_TASK_SET;
1679 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1680
1681 return rc;
1682}
1683
1684int mvs_clear_aca(struct domain_device *dev, u8 *lun)
1685{
1686 int rc = TMF_RESP_FUNC_FAILED;
1687 struct mvs_tmf_task tmf_task;
1688
1689 tmf_task.tmf = TMF_CLEAR_ACA;
1690 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1691
1692 return rc;
1693}
1694
1695int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
1696{
1697 int rc = TMF_RESP_FUNC_FAILED;
1698 struct mvs_tmf_task tmf_task;
1699
1700 tmf_task.tmf = TMF_CLEAR_TASK_SET;
1701 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1702
1703 return rc;
1704}
1705
1706static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1707 u32 slot_idx, int err)
1708{
1709 struct mvs_device *mvi_dev = task->dev->lldd_dev;
1710 struct task_status_struct *tstat = &task->task_status;
1711 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1712 int stat = SAM_GOOD;
1713
1714
1715 resp->frame_len = sizeof(struct dev_to_host_fis);
1716 memcpy(&resp->ending_fis[0],
1717 SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
1718 sizeof(struct dev_to_host_fis));
1719 tstat->buf_valid_size = sizeof(*resp);
1720 if (unlikely(err))
1721 stat = SAS_PROTO_RESPONSE;
1722 return stat;
1723}
1724
1725static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1726 u32 slot_idx)
1727{
1728 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1729 int stat;
1730 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
1731 u32 tfs = 0;
1732 enum mvs_port_type type = PORT_TYPE_SAS;
1733
1734 if (err_dw0 & CMD_ISS_STPD)
1735 MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
1736
1737 MVS_CHIP_DISP->command_active(mvi, slot_idx);
1738
1739 stat = SAM_CHECK_COND;
1740 switch (task->task_proto) {
1741 case SAS_PROTOCOL_SSP:
1742 stat = SAS_ABORTED_TASK;
1743 break;
1744 case SAS_PROTOCOL_SMP:
1745 stat = SAM_CHECK_COND;
1746 break;
1747
1748 case SAS_PROTOCOL_SATA:
1749 case SAS_PROTOCOL_STP:
1750 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1751 {
1752 if (err_dw0 == 0x80400002)
1753 mv_printk("find reserved error, why?\n");
1754
1755 task->ata_task.use_ncq = 0;
1756 stat = SAS_PROTO_RESPONSE;
1757 mvs_sata_done(mvi, task, slot_idx, 1);
1758
1759 }
1760 break;
1761 default:
1762 break;
1763 }
1764
1765 return stat;
1766}
1767
1768int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1769{
1770 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1771 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1772 struct sas_task *task = slot->task;
1773 struct mvs_device *mvi_dev = NULL;
1774 struct task_status_struct *tstat;
1775
1776 bool aborted;
1777 void *to;
1778 enum exec_status sts;
1779
1780 if (mvi->exp_req)
1781 mvi->exp_req--;
1782 if (unlikely(!task || !task->lldd_task))
1783 return -1;
1784
1785 tstat = &task->task_status;
1786 mvi_dev = task->dev->lldd_dev;
1787
1788 mvs_hba_cq_dump(mvi);
1789
1790 spin_lock(&task->task_state_lock);
1791 task->task_state_flags &=
1792 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1793 task->task_state_flags |= SAS_TASK_STATE_DONE;
1794 /* race condition*/
1795 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1796 spin_unlock(&task->task_state_lock);
1797
1798 memset(tstat, 0, sizeof(*tstat));
1799 tstat->resp = SAS_TASK_COMPLETE;
1800
1801 if (unlikely(aborted)) {
1802 tstat->stat = SAS_ABORTED_TASK;
1803 if (mvi_dev)
1804 mvi_dev->runing_req--;
1805 if (sas_protocol_ata(task->task_proto))
1806 mvs_free_reg_set(mvi, mvi_dev);
1807
1808 mvs_slot_task_free(mvi, task, slot, slot_idx);
1809 return -1;
1810 }
1811
1812 if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) {
1813 mv_dprintk("port has not device.\n");
1814 tstat->stat = SAS_PHY_DOWN;
1815 goto out;
1816 }
1817
1818 /*
1819 if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) {
1820 mv_dprintk("Find device[%016llx] RXQ_ERR %X,
1821 err info:%016llx\n",
1822 SAS_ADDR(task->dev->sas_addr),
1823 rx_desc, (u64)(*(u64 *) slot->response));
1824 }
1825 */
1826
1827 /* error info record present */
1828 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1829 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1830 goto out;
1831 }
1832
1833 switch (task->task_proto) {
1834 case SAS_PROTOCOL_SSP:
1835 /* hw says status == 0, datapres == 0 */
1836 if (rx_desc & RXQ_GOOD) {
1837 tstat->stat = SAM_GOOD;
1838 tstat->resp = SAS_TASK_COMPLETE;
1839 }
1840 /* response frame present */
1841 else if (rx_desc & RXQ_RSP) {
1842 struct ssp_response_iu *iu = slot->response +
1843 sizeof(struct mvs_err_info);
1844 sas_ssp_task_response(mvi->dev, task, iu);
1845 } else
1846 tstat->stat = SAM_CHECK_COND;
1847 break;
1848
1849 case SAS_PROTOCOL_SMP: {
1850 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1851 tstat->stat = SAM_GOOD;
1852 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1853 memcpy(to + sg_resp->offset,
1854 slot->response + sizeof(struct mvs_err_info),
1855 sg_dma_len(sg_resp));
1856 kunmap_atomic(to, KM_IRQ0);
1857 break;
1858 }
1859
1860 case SAS_PROTOCOL_SATA:
1861 case SAS_PROTOCOL_STP:
1862 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1863 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
1864 break;
1865 }
1866
1867 default:
1868 tstat->stat = SAM_CHECK_COND;
1869 break;
1870 }
1871
1872out:
1873 if (mvi_dev) {
1874 mvi_dev->runing_req--;
1875 if (sas_protocol_ata(task->task_proto))
1876 mvs_free_reg_set(mvi, mvi_dev);
1877 }
1878 mvs_slot_task_free(mvi, task, slot, slot_idx);
1879 sts = tstat->stat;
1880
1881 spin_unlock(&mvi->lock);
1882 if (task->task_done)
1883 task->task_done(task);
1884 else
1885 mv_dprintk("why has not task_done.\n");
1886 spin_lock(&mvi->lock);
1887
1888 return sts;
1889}
1890
1891void mvs_release_task(struct mvs_info *mvi,
1892 int phy_no, struct domain_device *dev)
1893{
1894 int i = 0; u32 slot_idx;
1895 struct mvs_phy *phy;
1896 struct mvs_port *port;
1897 struct mvs_slot_info *slot, *slot2;
1898
1899 phy = &mvi->phy[phy_no];
1900 port = phy->port;
1901 if (!port)
1902 return;
1903
1904 list_for_each_entry_safe(slot, slot2, &port->list, entry) {
1905 struct sas_task *task;
1906 slot_idx = (u32) (slot - mvi->slot_info);
1907 task = slot->task;
1908
1909 if (dev && task->dev != dev)
1910 continue;
1911
1912 mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
1913 slot_idx, slot->slot_tag, task);
1914
1915 if (task->task_proto & SAS_PROTOCOL_SSP) {
1916 mv_printk("attached with SSP task CDB[");
1917 for (i = 0; i < 16; i++)
1918 mv_printk(" %02x", task->ssp_task.cdb[i]);
1919 mv_printk(" ]\n");
1920 }
1921
1922 mvs_slot_complete(mvi, slot_idx, 1);
1923 }
1924}
1925
1926static void mvs_phy_disconnected(struct mvs_phy *phy)
1927{
1928 phy->phy_attached = 0;
1929 phy->att_dev_info = 0;
1930 phy->att_dev_sas_addr = 0;
1931}
1932
1933static void mvs_work_queue(struct work_struct *work)
1934{
1935 struct delayed_work *dw = container_of(work, struct delayed_work, work);
1936 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
1937 struct mvs_info *mvi = mwq->mvi;
1938 unsigned long flags;
1939
1940 spin_lock_irqsave(&mvi->lock, flags);
1941 if (mwq->handler & PHY_PLUG_EVENT) {
1942 u32 phy_no = (unsigned long) mwq->data;
1943 struct sas_ha_struct *sas_ha = mvi->sas;
1944 struct mvs_phy *phy = &mvi->phy[phy_no];
1945 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1946
1947 if (phy->phy_event & PHY_PLUG_OUT) {
1948 u32 tmp;
1949 struct sas_identify_frame *id;
1950 id = (struct sas_identify_frame *)phy->frame_rcvd;
1951 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
1952 phy->phy_event &= ~PHY_PLUG_OUT;
1953 if (!(tmp & PHY_READY_MASK)) {
1954 sas_phy_disconnected(sas_phy);
1955 mvs_phy_disconnected(phy);
1956 sas_ha->notify_phy_event(sas_phy,
1957 PHYE_LOSS_OF_SIGNAL);
1958 mv_dprintk("phy%d Removed Device\n", phy_no);
1959 } else {
1960 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
1961 mvs_update_phyinfo(mvi, phy_no, 1);
1962 mvs_bytes_dmaed(mvi, phy_no);
1963 mvs_port_notify_formed(sas_phy, 0);
1964 mv_dprintk("phy%d Attached Device\n", phy_no);
1965 }
1966 }
1967 }
1968 list_del(&mwq->entry);
1969 spin_unlock_irqrestore(&mvi->lock, flags);
1970 kfree(mwq);
1971}
1972
1973static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
1974{
1975 struct mvs_wq *mwq;
1976 int ret = 0;
1977
1978 mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
1979 if (mwq) {
1980 mwq->mvi = mvi;
1981 mwq->data = data;
1982 mwq->handler = handler;
1983 MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
1984 list_add_tail(&mwq->entry, &mvi->wq_list);
1985 schedule_delayed_work(&mwq->work_q, HZ * 2);
1986 } else
1987 ret = -ENOMEM;
1988
1989 return ret;
1990}
1991
1992static void mvs_sig_time_out(unsigned long tphy)
1993{
1994 struct mvs_phy *phy = (struct mvs_phy *)tphy;
1995 struct mvs_info *mvi = phy->mvi;
1996 u8 phy_no;
1997
1998 for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
1999 if (&mvi->phy[phy_no] == phy) {
2000 mv_dprintk("Get signature time out, reset phy %d\n",
2001 phy_no+mvi->id*mvi->chip->n_phy);
2002 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1);
2003 }
2004 }
2005}
2006
2007static void mvs_sig_remove_timer(struct mvs_phy *phy)
2008{
2009 if (phy->timer.function)
2010 del_timer(&phy->timer);
2011 phy->timer.function = NULL;
2012}
2013
2014void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2015{
2016 u32 tmp;
2017 struct sas_ha_struct *sas_ha = mvi->sas;
2018 struct mvs_phy *phy = &mvi->phy[phy_no];
2019 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2020
2021 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
2022 mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy,
2023 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
2024 mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy,
2025 phy->irq_status);
2026
2027 /*
2028 * events is port event now ,
2029 * we need check the interrupt status which belongs to per port.
2030 */
2031
2032 if (phy->irq_status & PHYEV_DCDR_ERR)
2033 mv_dprintk("port %d STP decoding error.\n",
2034 phy_no+mvi->id*mvi->chip->n_phy);
2035
2036 if (phy->irq_status & PHYEV_POOF) {
2037 if (!(phy->phy_event & PHY_PLUG_OUT)) {
2038 int dev_sata = phy->phy_type & PORT_TYPE_SATA;
2039 int ready;
2040 mvs_release_task(mvi, phy_no, NULL);
2041 phy->phy_event |= PHY_PLUG_OUT;
2042 mvs_handle_event(mvi,
2043 (void *)(unsigned long)phy_no,
2044 PHY_PLUG_EVENT);
2045 ready = mvs_is_phy_ready(mvi, phy_no);
2046 if (!ready)
2047 mv_dprintk("phy%d Unplug Notice\n",
2048 phy_no +
2049 mvi->id * mvi->chip->n_phy);
2050 if (ready || dev_sata) {
2051 if (MVS_CHIP_DISP->stp_reset)
2052 MVS_CHIP_DISP->stp_reset(mvi,
2053 phy_no);
2054 else
2055 MVS_CHIP_DISP->phy_reset(mvi,
2056 phy_no, 0);
2057 return;
2058 }
2059 }
2060 }
2061
2062 if (phy->irq_status & PHYEV_COMWAKE) {
2063 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
2064 MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
2065 tmp | PHYEV_SIG_FIS);
2066 if (phy->timer.function == NULL) {
2067 phy->timer.data = (unsigned long)phy;
2068 phy->timer.function = mvs_sig_time_out;
2069 phy->timer.expires = jiffies + 10*HZ;
2070 add_timer(&phy->timer);
2071 }
2072 }
2073 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
2074 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
2075 mvs_sig_remove_timer(phy);
2076 mv_dprintk("notify plug in on phy[%d]\n", phy_no);
2077 if (phy->phy_status) {
2078 mdelay(10);
2079 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2080 if (phy->phy_type & PORT_TYPE_SATA) {
2081 tmp = MVS_CHIP_DISP->read_port_irq_mask(
2082 mvi, phy_no);
2083 tmp &= ~PHYEV_SIG_FIS;
2084 MVS_CHIP_DISP->write_port_irq_mask(mvi,
2085 phy_no, tmp);
2086 }
2087 mvs_update_phyinfo(mvi, phy_no, 0);
2088 mvs_bytes_dmaed(mvi, phy_no);
2089 /* whether driver is going to handle hot plug */
2090 if (phy->phy_event & PHY_PLUG_OUT) {
2091 mvs_port_notify_formed(sas_phy, 0);
2092 phy->phy_event &= ~PHY_PLUG_OUT;
2093 }
2094 } else {
2095 mv_dprintk("plugin interrupt but phy%d is gone\n",
2096 phy_no + mvi->id*mvi->chip->n_phy);
2097 }
2098 } else if (phy->irq_status & PHYEV_BROAD_CH) {
2099 mv_dprintk("port %d broadcast change.\n",
2100 phy_no + mvi->id*mvi->chip->n_phy);
2101 /* exception for Samsung disk drive*/
2102 mdelay(1000);
2103 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2104 }
2105 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2106}
2107
2108int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
2109{
2110 u32 rx_prod_idx, rx_desc;
2111 bool attn = false;
2112
2113 /* the first dword in the RX ring is special: it contains
2114 * a mirror of the hardware's RX producer index, so that
2115 * we don't have to stall the CPU reading that register.
2116 * The actual RX ring is offset by one dword, due to this.
2117 */
2118 rx_prod_idx = mvi->rx_cons;
2119 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
2120 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
2121 return 0;
2122
2123 /* The CMPL_Q may come late, read from register and try again
2124 * note: if coalescing is enabled,
2125 * it will need to read from register every time for sure
2126 */
2127 if (unlikely(mvi->rx_cons == rx_prod_idx))
2128 mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
2129
2130 if (mvi->rx_cons == rx_prod_idx)
2131 return 0;
2132
2133 while (mvi->rx_cons != rx_prod_idx) {
2134 /* increment our internal RX consumer pointer */
2135 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
2136 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
2137
2138 if (likely(rx_desc & RXQ_DONE))
2139 mvs_slot_complete(mvi, rx_desc, 0);
2140 if (rx_desc & RXQ_ATTN) {
2141 attn = true;
2142 } else if (rx_desc & RXQ_ERR) {
2143 if (!(rx_desc & RXQ_DONE))
2144 mvs_slot_complete(mvi, rx_desc, 0);
2145 } else if (rx_desc & RXQ_SLOT_RESET) {
2146 mvs_slot_free(mvi, rx_desc);
2147 }
2148 }
2149
2150 if (attn && self_clear)
2151 MVS_CHIP_DISP->int_full(mvi);
2152 return 0;
2153}
2154
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
new file mode 100644
index 000000000000..aa2270af1bac
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -0,0 +1,406 @@
1/*
2 * Marvell 88SE64xx/88SE94xx main function head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MV_SAS_H_
26#define _MV_SAS_H_
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/spinlock.h>
31#include <linux/delay.h>
32#include <linux/types.h>
33#include <linux/ctype.h>
34#include <linux/dma-mapping.h>
35#include <linux/pci.h>
36#include <linux/platform_device.h>
37#include <linux/interrupt.h>
38#include <linux/irq.h>
39#include <linux/vmalloc.h>
40#include <scsi/libsas.h>
41#include <scsi/scsi_tcq.h>
42#include <scsi/sas_ata.h>
43#include <linux/version.h>
44#include "mv_defs.h"
45
46#define DRV_NAME "mvsas"
47#define DRV_VERSION "0.8.2"
48#define _MV_DUMP 0
49#define MVS_ID_NOT_MAPPED 0x7f
50/* #define DISABLE_HOTPLUG_DMA_FIX */
51#define MAX_EXP_RUNNING_REQ 2
52#define WIDE_PORT_MAX_PHY 4
53#define MV_DISABLE_NCQ 0
54#define mv_printk(fmt, arg ...) \
55 printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
56#ifdef MV_DEBUG
57#define mv_dprintk(format, arg...) \
58 printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg)
59#else
60#define mv_dprintk(format, arg...)
61#endif
62#define MV_MAX_U32 0xffffffff
63
64extern struct mvs_tgt_initiator mvs_tgt;
65extern struct mvs_info *tgt_mvi;
66extern const struct mvs_dispatch mvs_64xx_dispatch;
67extern const struct mvs_dispatch mvs_94xx_dispatch;
68
69#define DEV_IS_EXPANDER(type) \
70 ((type == EDGE_DEV) || (type == FANOUT_DEV))
71
72#define bit(n) ((u32)1 << n)
73
74#define for_each_phy(__lseq_mask, __mc, __lseq) \
75 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
76 (__mc) != 0 ; \
77 (++__lseq), (__mc) >>= 1)
78
79#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f)
80#define UNASSOC_D2H_FIS(id) \
81 ((void *) mvi->rx_fis + 0x100 * id)
82#define SATA_RECEIVED_FIS_LIST(reg_set) \
83 ((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set)
84#define SATA_RECEIVED_SDB_FIS(reg_set) \
85 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x58)
86#define SATA_RECEIVED_D2H_FIS(reg_set) \
87 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x40)
88#define SATA_RECEIVED_PIO_FIS(reg_set) \
89 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x20)
90#define SATA_RECEIVED_DMA_FIS(reg_set) \
91 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x00)
92
93enum dev_status {
94 MVS_DEV_NORMAL = 0x0,
95 MVS_DEV_EH = 0x1,
96};
97
98
99struct mvs_info;
100
101struct mvs_dispatch {
102 char *name;
103 int (*chip_init)(struct mvs_info *mvi);
104 int (*spi_init)(struct mvs_info *mvi);
105 int (*chip_ioremap)(struct mvs_info *mvi);
106 void (*chip_iounmap)(struct mvs_info *mvi);
107 irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat);
108 u32 (*isr_status)(struct mvs_info *mvi, int irq);
109 void (*interrupt_enable)(struct mvs_info *mvi);
110 void (*interrupt_disable)(struct mvs_info *mvi);
111
112 u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port);
113 void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val);
114
115 u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port);
116 void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val);
117 void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr);
118
119 u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port);
120 void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val);
121 void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr);
122
123 u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port);
124 void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val);
125
126 u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
127 void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
128
129 void (*get_sas_addr)(void *buf, u32 buflen);
130 void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
131 void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
132 u32 tfs);
133 void (*start_delivery)(struct mvs_info *mvi, u32 tx);
134 u32 (*rx_update)(struct mvs_info *mvi);
135 void (*int_full)(struct mvs_info *mvi);
136 u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs);
137 void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs);
138 u32 (*prd_size)(void);
139 u32 (*prd_count)(void);
140 void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
141 void (*detect_porttype)(struct mvs_info *mvi, int i);
142 int (*oob_done)(struct mvs_info *mvi, int i);
143 void (*fix_phy_info)(struct mvs_info *mvi, int i,
144 struct sas_identify_frame *id);
145 void (*phy_work_around)(struct mvs_info *mvi, int i);
146 void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id,
147 struct sas_phy_linkrates *rates);
148 u32 (*phy_max_link_rate)(void);
149 void (*phy_disable)(struct mvs_info *mvi, u32 phy_id);
150 void (*phy_enable)(struct mvs_info *mvi, u32 phy_id);
151 void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard);
152 void (*stp_reset)(struct mvs_info *mvi, u32 phy_id);
153 void (*clear_active_cmds)(struct mvs_info *mvi);
154 u32 (*spi_read_data)(struct mvs_info *mvi);
155 void (*spi_write_data)(struct mvs_info *mvi, u32 data);
156 int (*spi_buildcmd)(struct mvs_info *mvi,
157 u32 *dwCmd,
158 u8 cmd,
159 u8 read,
160 u8 length,
161 u32 addr
162 );
163 int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
164 int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
165#ifndef DISABLE_HOTPLUG_DMA_FIX
166 void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd);
167#endif
168
169};
170
171struct mvs_chip_info {
172 u32 n_host;
173 u32 n_phy;
174 u32 fis_offs;
175 u32 fis_count;
176 u32 srs_sz;
177 u32 slot_width;
178 const struct mvs_dispatch *dispatch;
179};
180#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
181#define MVS_RX_FISL_SZ \
182 (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
183#define MVS_CHIP_DISP (mvi->chip->dispatch)
184
185struct mvs_err_info {
186 __le32 flags;
187 __le32 flags2;
188};
189
190struct mvs_cmd_hdr {
191 __le32 flags; /* PRD tbl len; SAS, SATA ctl */
192 __le32 lens; /* cmd, max resp frame len */
193 __le32 tags; /* targ port xfer tag; tag */
194 __le32 data_len; /* data xfer len */
195 __le64 cmd_tbl; /* command table address */
196 __le64 open_frame; /* open addr frame address */
197 __le64 status_buf; /* status buffer address */
198 __le64 prd_tbl; /* PRD tbl address */
199 __le32 reserved[4];
200};
201
202struct mvs_port {
203 struct asd_sas_port sas_port;
204 u8 port_attached;
205 u8 wide_port_phymap;
206 struct list_head list;
207};
208
209struct mvs_phy {
210 struct mvs_info *mvi;
211 struct mvs_port *port;
212 struct asd_sas_phy sas_phy;
213 struct sas_identify identify;
214 struct scsi_device *sdev;
215 struct timer_list timer;
216 u64 dev_sas_addr;
217 u64 att_dev_sas_addr;
218 u32 att_dev_info;
219 u32 dev_info;
220 u32 phy_type;
221 u32 phy_status;
222 u32 irq_status;
223 u32 frame_rcvd_size;
224 u8 frame_rcvd[32];
225 u8 phy_attached;
226 u8 phy_mode;
227 u8 reserved[2];
228 u32 phy_event;
229 enum sas_linkrate minimum_linkrate;
230 enum sas_linkrate maximum_linkrate;
231};
232
233struct mvs_device {
234 struct list_head dev_entry;
235 enum sas_dev_type dev_type;
236 struct mvs_info *mvi_info;
237 struct domain_device *sas_device;
238 u32 attached_phy;
239 u32 device_id;
240 u32 runing_req;
241 u8 taskfileset;
242 u8 dev_status;
243 u16 reserved;
244};
245
246struct mvs_slot_info {
247 struct list_head entry;
248 union {
249 struct sas_task *task;
250 void *tdata;
251 };
252 u32 n_elem;
253 u32 tx;
254 u32 slot_tag;
255
256 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
257 * and PRD table
258 */
259 void *buf;
260 dma_addr_t buf_dma;
261#if _MV_DUMP
262 u32 cmd_size;
263#endif
264 void *response;
265 struct mvs_port *port;
266 struct mvs_device *device;
267 void *open_frame;
268};
269
270struct mvs_info {
271 unsigned long flags;
272
273 /* host-wide lock */
274 spinlock_t lock;
275
276 /* our device */
277 struct pci_dev *pdev;
278 struct device *dev;
279
280 /* enhanced mode registers */
281 void __iomem *regs;
282
283 /* peripheral or soc registers */
284 void __iomem *regs_ex;
285 u8 sas_addr[SAS_ADDR_SIZE];
286
287 /* SCSI/SAS glue */
288 struct sas_ha_struct *sas;
289 struct Scsi_Host *shost;
290
291 /* TX (delivery) DMA ring */
292 __le32 *tx;
293 dma_addr_t tx_dma;
294
295 /* cached next-producer idx */
296 u32 tx_prod;
297
298 /* RX (completion) DMA ring */
299 __le32 *rx;
300 dma_addr_t rx_dma;
301
302 /* RX consumer idx */
303 u32 rx_cons;
304
305 /* RX'd FIS area */
306 __le32 *rx_fis;
307 dma_addr_t rx_fis_dma;
308
309 /* DMA command header slots */
310 struct mvs_cmd_hdr *slot;
311 dma_addr_t slot_dma;
312
313 u32 chip_id;
314 const struct mvs_chip_info *chip;
315
316 int tags_num;
317 DECLARE_BITMAP(tags, MVS_SLOTS);
318 /* further per-slot information */
319 struct mvs_phy phy[MVS_MAX_PHYS];
320 struct mvs_port port[MVS_MAX_PHYS];
321 u32 irq;
322 u32 exp_req;
323 u32 id;
324 u64 sata_reg_set;
325 struct list_head *hba_list;
326 struct list_head soc_entry;
327 struct list_head wq_list;
328 unsigned long instance;
329 u16 flashid;
330 u32 flashsize;
331 u32 flashsectSize;
332
333 void *addon;
334 struct mvs_device devices[MVS_MAX_DEVICES];
335#ifndef DISABLE_HOTPLUG_DMA_FIX
336 void *bulk_buffer;
337 dma_addr_t bulk_buffer_dma;
338#define TRASH_BUCKET_SIZE 0x20000
339#endif
340 struct mvs_slot_info slot_info[0];
341};
342
343struct mvs_prv_info{
344 u8 n_host;
345 u8 n_phy;
346 u16 reserve;
347 struct mvs_info *mvi[2];
348};
349
350struct mvs_wq {
351 struct delayed_work work_q;
352 struct mvs_info *mvi;
353 void *data;
354 int handler;
355 struct list_head entry;
356};
357
358struct mvs_task_exec_info {
359 struct sas_task *task;
360 struct mvs_cmd_hdr *hdr;
361 struct mvs_port *port;
362 u32 tag;
363 int n_elem;
364};
365
366
367/******************** function prototype *********************/
368void mvs_get_sas_addr(void *buf, u32 buflen);
369void mvs_tag_clear(struct mvs_info *mvi, u32 tag);
370void mvs_tag_free(struct mvs_info *mvi, u32 tag);
371void mvs_tag_set(struct mvs_info *mvi, unsigned int tag);
372int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out);
373void mvs_tag_init(struct mvs_info *mvi);
374void mvs_iounmap(void __iomem *regs);
375int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
376void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
377int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
378 void *funcdata);
379void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
380 u32 off_lo, u32 off_hi, u64 sas_addr);
381int mvs_slave_alloc(struct scsi_device *scsi_dev);
382int mvs_slave_configure(struct scsi_device *sdev);
383void mvs_scan_start(struct Scsi_Host *shost);
384int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
385int mvs_queue_command(struct sas_task *task, const int num,
386 gfp_t gfp_flags);
387int mvs_abort_task(struct sas_task *task);
388int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
389int mvs_clear_aca(struct domain_device *dev, u8 *lun);
390int mvs_clear_task_set(struct domain_device *dev, u8 * lun);
391void mvs_port_formed(struct asd_sas_phy *sas_phy);
392void mvs_port_deformed(struct asd_sas_phy *sas_phy);
393int mvs_dev_found(struct domain_device *dev);
394void mvs_dev_gone(struct domain_device *dev);
395int mvs_lu_reset(struct domain_device *dev, u8 *lun);
396int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags);
397int mvs_I_T_nexus_reset(struct domain_device *dev);
398int mvs_query_task(struct sas_task *task);
399void mvs_release_task(struct mvs_info *mvi, int phy_no,
400 struct domain_device *dev);
401void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
402void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
403int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
404void mvs_hexdump(u32 size, u8 *data, u32 baseaddr);
405#endif
406
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 3b7240e40819..e3c482aa87b5 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -5444,7 +5444,7 @@ static void ncr_getsync(struct ncb *np, u_char sfac, u_char *fakp, u_char *scntl
5444 ** input speed faster than the period. 5444 ** input speed faster than the period.
5445 */ 5445 */
5446 kpc = per * clk; 5446 kpc = per * clk;
5447 while (--div >= 0) 5447 while (--div > 0)
5448 if (kpc >= (div_10M[div] << 2)) break; 5448 if (kpc >= (div_10M[div] << 2)) break;
5449 5449
5450 /* 5450 /*
diff --git a/drivers/scsi/osd/Kbuild b/drivers/scsi/osd/Kbuild
index 0e207aa67d16..5fd73d77c3af 100644
--- a/drivers/scsi/osd/Kbuild
+++ b/drivers/scsi/osd/Kbuild
@@ -11,31 +11,6 @@
11# it under the terms of the GNU General Public License version 2 11# it under the terms of the GNU General Public License version 2
12# 12#
13 13
14ifneq ($(OSD_INC),)
15# we are built out-of-tree Kconfigure everything as on
16
17CONFIG_SCSI_OSD_INITIATOR=m
18ccflags-y += -DCONFIG_SCSI_OSD_INITIATOR -DCONFIG_SCSI_OSD_INITIATOR_MODULE
19
20CONFIG_SCSI_OSD_ULD=m
21ccflags-y += -DCONFIG_SCSI_OSD_ULD -DCONFIG_SCSI_OSD_ULD_MODULE
22
23# CONFIG_SCSI_OSD_DPRINT_SENSE =
24# 0 - no print of errors
25# 1 - print errors
26# 2 - errors + warrnings
27ccflags-y += -DCONFIG_SCSI_OSD_DPRINT_SENSE=1
28
29# Uncomment to turn debug on
30# ccflags-y += -DCONFIG_SCSI_OSD_DEBUG
31
32# if we are built out-of-tree and the hosting kernel has OSD headers
33# then "ccflags-y +=" will not pick the out-off-tree headers. Only by doing
34# this it will work. This might break in future kernels
35LINUXINCLUDE := -I$(OSD_INC) $(LINUXINCLUDE)
36
37endif
38
39# libosd.ko - osd-initiator library 14# libosd.ko - osd-initiator library
40libosd-y := osd_initiator.o 15libosd-y := osd_initiator.o
41obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o 16obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o
diff --git a/drivers/scsi/osd/Makefile b/drivers/scsi/osd/Makefile
deleted file mode 100755
index d905344f83ba..000000000000
--- a/drivers/scsi/osd/Makefile
+++ /dev/null
@@ -1,37 +0,0 @@
1#
2# Makefile for the OSD modules (out of tree)
3#
4# Copyright (C) 2008 Panasas Inc. All rights reserved.
5#
6# Authors:
7# Boaz Harrosh <bharrosh@panasas.com>
8# Benny Halevy <bhalevy@panasas.com>
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU General Public License version 2
12#
13# This Makefile is used to call the kernel Makefile in case of an out-of-tree
14# build.
15# $KSRC should point to a Kernel source tree otherwise host's default is
16# used. (eg. /lib/modules/`uname -r`/build)
17
18# include path for out-of-tree Headers
19OSD_INC ?= `pwd`/../../../include
20
21# allow users to override these
22# e.g. to compile for a kernel that you aren't currently running
23KSRC ?= /lib/modules/$(shell uname -r)/build
24KBUILD_OUTPUT ?=
25ARCH ?=
26V ?= 0
27
28# this is the basic Kbuild out-of-tree invocation, with the M= option
29KBUILD_BASE = +$(MAKE) -C $(KSRC) M=`pwd` KBUILD_OUTPUT=$(KBUILD_OUTPUT) ARCH=$(ARCH) V=$(V)
30
31all: libosd
32
33libosd: ;
34 $(KBUILD_BASE) OSD_INC=$(OSD_INC) modules
35
36clean:
37 $(KBUILD_BASE) clean
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 1ce6b24abab2..7a117c18114c 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -118,39 +118,39 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
118 _osd_ver_desc(or)); 118 _osd_ver_desc(or));
119 119
120 pFirst = get_attrs[a++].val_ptr; 120 pFirst = get_attrs[a++].val_ptr;
121 OSD_INFO("OSD_ATTR_RI_VENDOR_IDENTIFICATION [%s]\n", 121 OSD_INFO("VENDOR_IDENTIFICATION [%s]\n",
122 (char *)pFirst); 122 (char *)pFirst);
123 123
124 pFirst = get_attrs[a++].val_ptr; 124 pFirst = get_attrs[a++].val_ptr;
125 OSD_INFO("OSD_ATTR_RI_PRODUCT_IDENTIFICATION [%s]\n", 125 OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
126 (char *)pFirst); 126 (char *)pFirst);
127 127
128 pFirst = get_attrs[a++].val_ptr; 128 pFirst = get_attrs[a++].val_ptr;
129 OSD_INFO("OSD_ATTR_RI_PRODUCT_MODEL [%s]\n", 129 OSD_INFO("PRODUCT_MODEL [%s]\n",
130 (char *)pFirst); 130 (char *)pFirst);
131 131
132 pFirst = get_attrs[a++].val_ptr; 132 pFirst = get_attrs[a++].val_ptr;
133 OSD_INFO("OSD_ATTR_RI_PRODUCT_REVISION_LEVEL [%u]\n", 133 OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
134 pFirst ? get_unaligned_be32(pFirst) : ~0U); 134 pFirst ? get_unaligned_be32(pFirst) : ~0U);
135 135
136 pFirst = get_attrs[a++].val_ptr; 136 pFirst = get_attrs[a++].val_ptr;
137 OSD_INFO("OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER [%s]\n", 137 OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n",
138 (char *)pFirst); 138 (char *)pFirst);
139 139
140 pFirst = get_attrs[a].val_ptr; 140 pFirst = get_attrs[a].val_ptr;
141 OSD_INFO("OSD_ATTR_RI_OSD_NAME [%s]\n", (char *)pFirst); 141 OSD_INFO("OSD_NAME [%s]\n", (char *)pFirst);
142 a++; 142 a++;
143 143
144 pFirst = get_attrs[a++].val_ptr; 144 pFirst = get_attrs[a++].val_ptr;
145 OSD_INFO("OSD_ATTR_RI_TOTAL_CAPACITY [0x%llx]\n", 145 OSD_INFO("TOTAL_CAPACITY [0x%llx]\n",
146 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 146 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
147 147
148 pFirst = get_attrs[a++].val_ptr; 148 pFirst = get_attrs[a++].val_ptr;
149 OSD_INFO("OSD_ATTR_RI_USED_CAPACITY [0x%llx]\n", 149 OSD_INFO("USED_CAPACITY [0x%llx]\n",
150 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 150 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
151 151
152 pFirst = get_attrs[a++].val_ptr; 152 pFirst = get_attrs[a++].val_ptr;
153 OSD_INFO("OSD_ATTR_RI_NUMBER_OF_PARTITIONS [%llu]\n", 153 OSD_INFO("NUMBER_OF_PARTITIONS [%llu]\n",
154 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 154 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
155 155
156 if (a >= nelem) 156 if (a >= nelem)
@@ -158,7 +158,7 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
158 158
159 /* FIXME: Where are the time utilities */ 159 /* FIXME: Where are the time utilities */
160 pFirst = get_attrs[a++].val_ptr; 160 pFirst = get_attrs[a++].val_ptr;
161 OSD_INFO("OSD_ATTR_RI_CLOCK [0x%02x%02x%02x%02x%02x%02x]\n", 161 OSD_INFO("CLOCK [0x%02x%02x%02x%02x%02x%02x]\n",
162 ((char *)pFirst)[0], ((char *)pFirst)[1], 162 ((char *)pFirst)[0], ((char *)pFirst)[1],
163 ((char *)pFirst)[2], ((char *)pFirst)[3], 163 ((char *)pFirst)[2], ((char *)pFirst)[3],
164 ((char *)pFirst)[4], ((char *)pFirst)[5]); 164 ((char *)pFirst)[4], ((char *)pFirst)[5]);
@@ -169,7 +169,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
169 169
170 hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1, 170 hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
171 sid_dump, sizeof(sid_dump), true); 171 sid_dump, sizeof(sid_dump), true);
172 OSD_INFO("OSD_ATTR_RI_OSD_SYSTEM_ID(%d) [%s]\n", len, sid_dump); 172 OSD_INFO("OSD_SYSTEM_ID(%d)\n"
173 " [%s]\n", len, sid_dump);
173 a++; 174 a++;
174 } 175 }
175out: 176out:
@@ -669,7 +670,7 @@ static int _osd_req_list_objects(struct osd_request *or,
669 __be16 action, const struct osd_obj_id *obj, osd_id initial_id, 670 __be16 action, const struct osd_obj_id *obj, osd_id initial_id,
670 struct osd_obj_id_list *list, unsigned nelem) 671 struct osd_obj_id_list *list, unsigned nelem)
671{ 672{
672 struct request_queue *q = or->osd_dev->scsi_device->request_queue; 673 struct request_queue *q = osd_request_queue(or->osd_dev);
673 u64 len = nelem * sizeof(osd_id) + sizeof(*list); 674 u64 len = nelem * sizeof(osd_id) + sizeof(*list);
674 struct bio *bio; 675 struct bio *bio;
675 676
@@ -778,16 +779,32 @@ EXPORT_SYMBOL(osd_req_remove_object);
778*/ 779*/
779 780
780void osd_req_write(struct osd_request *or, 781void osd_req_write(struct osd_request *or,
781 const struct osd_obj_id *obj, struct bio *bio, u64 offset) 782 const struct osd_obj_id *obj, u64 offset,
783 struct bio *bio, u64 len)
782{ 784{
783 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, bio->bi_size); 785 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
784 WARN_ON(or->out.bio || or->out.total_bytes); 786 WARN_ON(or->out.bio || or->out.total_bytes);
785 bio->bi_rw |= (1 << BIO_RW); 787 WARN_ON(0 == bio_rw_flagged(bio, BIO_RW));
786 or->out.bio = bio; 788 or->out.bio = bio;
787 or->out.total_bytes = bio->bi_size; 789 or->out.total_bytes = len;
788} 790}
789EXPORT_SYMBOL(osd_req_write); 791EXPORT_SYMBOL(osd_req_write);
790 792
793int osd_req_write_kern(struct osd_request *or,
794 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
795{
796 struct request_queue *req_q = osd_request_queue(or->osd_dev);
797 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
798
799 if (IS_ERR(bio))
800 return PTR_ERR(bio);
801
802 bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
803 osd_req_write(or, obj, offset, bio, len);
804 return 0;
805}
806EXPORT_SYMBOL(osd_req_write_kern);
807
791/*TODO: void osd_req_append(struct osd_request *, 808/*TODO: void osd_req_append(struct osd_request *,
792 const struct osd_obj_id *, struct bio *data_out); */ 809 const struct osd_obj_id *, struct bio *data_out); */
793/*TODO: void osd_req_create_write(struct osd_request *, 810/*TODO: void osd_req_create_write(struct osd_request *,
@@ -813,16 +830,31 @@ void osd_req_flush_object(struct osd_request *or,
813EXPORT_SYMBOL(osd_req_flush_object); 830EXPORT_SYMBOL(osd_req_flush_object);
814 831
815void osd_req_read(struct osd_request *or, 832void osd_req_read(struct osd_request *or,
816 const struct osd_obj_id *obj, struct bio *bio, u64 offset) 833 const struct osd_obj_id *obj, u64 offset,
834 struct bio *bio, u64 len)
817{ 835{
818 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, bio->bi_size); 836 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
819 WARN_ON(or->in.bio || or->in.total_bytes); 837 WARN_ON(or->in.bio || or->in.total_bytes);
820 bio->bi_rw &= ~(1 << BIO_RW); 838 WARN_ON(1 == bio_rw_flagged(bio, BIO_RW));
821 or->in.bio = bio; 839 or->in.bio = bio;
822 or->in.total_bytes = bio->bi_size; 840 or->in.total_bytes = len;
823} 841}
824EXPORT_SYMBOL(osd_req_read); 842EXPORT_SYMBOL(osd_req_read);
825 843
844int osd_req_read_kern(struct osd_request *or,
845 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
846{
847 struct request_queue *req_q = osd_request_queue(or->osd_dev);
848 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
849
850 if (IS_ERR(bio))
851 return PTR_ERR(bio);
852
853 osd_req_read(or, obj, offset, bio, len);
854 return 0;
855}
856EXPORT_SYMBOL(osd_req_read_kern);
857
826void osd_req_get_attributes(struct osd_request *or, 858void osd_req_get_attributes(struct osd_request *or,
827 const struct osd_obj_id *obj) 859 const struct osd_obj_id *obj)
828{ 860{
@@ -889,26 +921,6 @@ int osd_req_add_set_attr_list(struct osd_request *or,
889} 921}
890EXPORT_SYMBOL(osd_req_add_set_attr_list); 922EXPORT_SYMBOL(osd_req_add_set_attr_list);
891 923
892static int _append_map_kern(struct request *req,
893 void *buff, unsigned len, gfp_t flags)
894{
895 struct bio *bio;
896 int ret;
897
898 bio = bio_map_kern(req->q, buff, len, flags);
899 if (IS_ERR(bio)) {
900 OSD_ERR("Failed bio_map_kern(%p, %d) => %ld\n", buff, len,
901 PTR_ERR(bio));
902 return PTR_ERR(bio);
903 }
904 ret = blk_rq_append_bio(req->q, req, bio);
905 if (ret) {
906 OSD_ERR("Failed blk_rq_append_bio(%p) => %d\n", bio, ret);
907 bio_put(bio);
908 }
909 return ret;
910}
911
912static int _req_append_segment(struct osd_request *or, 924static int _req_append_segment(struct osd_request *or,
913 unsigned padding, struct _osd_req_data_segment *seg, 925 unsigned padding, struct _osd_req_data_segment *seg,
914 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io) 926 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
@@ -924,14 +936,14 @@ static int _req_append_segment(struct osd_request *or,
924 else 936 else
925 pad_buff = io->pad_buff; 937 pad_buff = io->pad_buff;
926 938
927 ret = _append_map_kern(io->req, pad_buff, padding, 939 ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
928 or->alloc_flags); 940 or->alloc_flags);
929 if (ret) 941 if (ret)
930 return ret; 942 return ret;
931 io->total_bytes += padding; 943 io->total_bytes += padding;
932 } 944 }
933 945
934 ret = _append_map_kern(io->req, seg->buff, seg->total_bytes, 946 ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
935 or->alloc_flags); 947 or->alloc_flags);
936 if (ret) 948 if (ret)
937 return ret; 949 return ret;
@@ -1233,7 +1245,7 @@ static inline void osd_sec_parms_set_in_offset(bool is_v1,
1233} 1245}
1234 1246
1235static int _osd_req_finalize_data_integrity(struct osd_request *or, 1247static int _osd_req_finalize_data_integrity(struct osd_request *or,
1236 bool has_in, bool has_out, const u8 *cap_key) 1248 bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key)
1237{ 1249{
1238 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or); 1250 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
1239 int ret; 1251 int ret;
@@ -1248,8 +1260,7 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
1248 }; 1260 };
1249 unsigned pad; 1261 unsigned pad;
1250 1262
1251 or->out_data_integ.data_bytes = cpu_to_be64( 1263 or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
1252 or->out.bio ? or->out.bio->bi_size : 0);
1253 or->out_data_integ.set_attributes_bytes = cpu_to_be64( 1264 or->out_data_integ.set_attributes_bytes = cpu_to_be64(
1254 or->set_attr.total_bytes); 1265 or->set_attr.total_bytes);
1255 or->out_data_integ.get_attributes_bytes = cpu_to_be64( 1266 or->out_data_integ.get_attributes_bytes = cpu_to_be64(
@@ -1293,6 +1304,21 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
1293/* 1304/*
1294 * osd_finalize_request and helpers 1305 * osd_finalize_request and helpers
1295 */ 1306 */
1307static struct request *_make_request(struct request_queue *q, bool has_write,
1308 struct _osd_io_info *oii, gfp_t flags)
1309{
1310 if (oii->bio)
1311 return blk_make_request(q, oii->bio, flags);
1312 else {
1313 struct request *req;
1314
1315 req = blk_get_request(q, has_write ? WRITE : READ, flags);
1316 if (unlikely(!req))
1317 return ERR_PTR(-ENOMEM);
1318
1319 return req;
1320 }
1321}
1296 1322
1297static int _init_blk_request(struct osd_request *or, 1323static int _init_blk_request(struct osd_request *or,
1298 bool has_in, bool has_out) 1324 bool has_in, bool has_out)
@@ -1301,14 +1327,18 @@ static int _init_blk_request(struct osd_request *or,
1301 struct scsi_device *scsi_device = or->osd_dev->scsi_device; 1327 struct scsi_device *scsi_device = or->osd_dev->scsi_device;
1302 struct request_queue *q = scsi_device->request_queue; 1328 struct request_queue *q = scsi_device->request_queue;
1303 struct request *req; 1329 struct request *req;
1304 int ret = -ENOMEM; 1330 int ret;
1305 1331
1306 req = blk_get_request(q, has_out, flags); 1332 req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
1307 if (!req) 1333 if (IS_ERR(req)) {
1334 ret = PTR_ERR(req);
1308 goto out; 1335 goto out;
1336 }
1309 1337
1310 or->request = req; 1338 or->request = req;
1311 req->cmd_type = REQ_TYPE_BLOCK_PC; 1339 req->cmd_type = REQ_TYPE_BLOCK_PC;
1340 req->cmd_flags |= REQ_QUIET;
1341
1312 req->timeout = or->timeout; 1342 req->timeout = or->timeout;
1313 req->retries = or->retries; 1343 req->retries = or->retries;
1314 req->sense = or->sense; 1344 req->sense = or->sense;
@@ -1318,9 +1348,10 @@ static int _init_blk_request(struct osd_request *or,
1318 or->out.req = req; 1348 or->out.req = req;
1319 if (has_in) { 1349 if (has_in) {
1320 /* allocate bidi request */ 1350 /* allocate bidi request */
1321 req = blk_get_request(q, READ, flags); 1351 req = _make_request(q, false, &or->in, flags);
1322 if (!req) { 1352 if (IS_ERR(req)) {
1323 OSD_DEBUG("blk_get_request for bidi failed\n"); 1353 OSD_DEBUG("blk_get_request for bidi failed\n");
1354 ret = PTR_ERR(req);
1324 goto out; 1355 goto out;
1325 } 1356 }
1326 req->cmd_type = REQ_TYPE_BLOCK_PC; 1357 req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1341,6 +1372,7 @@ int osd_finalize_request(struct osd_request *or,
1341{ 1372{
1342 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); 1373 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1343 bool has_in, has_out; 1374 bool has_in, has_out;
1375 u64 out_data_bytes = or->out.total_bytes;
1344 int ret; 1376 int ret;
1345 1377
1346 if (options & OSD_REQ_FUA) 1378 if (options & OSD_REQ_FUA)
@@ -1364,26 +1396,6 @@ int osd_finalize_request(struct osd_request *or,
1364 return ret; 1396 return ret;
1365 } 1397 }
1366 1398
1367 if (or->out.bio) {
1368 ret = blk_rq_append_bio(or->request->q, or->out.req,
1369 or->out.bio);
1370 if (ret) {
1371 OSD_DEBUG("blk_rq_append_bio out failed\n");
1372 return ret;
1373 }
1374 OSD_DEBUG("out bytes=%llu (bytes_req=%u)\n",
1375 _LLU(or->out.total_bytes), or->out.req->data_len);
1376 }
1377 if (or->in.bio) {
1378 ret = blk_rq_append_bio(or->request->q, or->in.req, or->in.bio);
1379 if (ret) {
1380 OSD_DEBUG("blk_rq_append_bio in failed\n");
1381 return ret;
1382 }
1383 OSD_DEBUG("in bytes=%llu (bytes_req=%u)\n",
1384 _LLU(or->in.total_bytes), or->in.req->data_len);
1385 }
1386
1387 or->out.pad_buff = sg_out_pad_buffer; 1399 or->out.pad_buff = sg_out_pad_buffer;
1388 or->in.pad_buff = sg_in_pad_buffer; 1400 or->in.pad_buff = sg_in_pad_buffer;
1389 1401
@@ -1410,7 +1422,8 @@ int osd_finalize_request(struct osd_request *or,
1410 } 1422 }
1411 } 1423 }
1412 1424
1413 ret = _osd_req_finalize_data_integrity(or, has_in, has_out, cap_key); 1425 ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
1426 out_data_bytes, cap_key);
1414 if (ret) 1427 if (ret)
1415 return ret; 1428 return ret;
1416 1429
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index 22b59e13ba83..0bdef3390902 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -49,6 +49,7 @@
49#include <linux/device.h> 49#include <linux/device.h>
50#include <linux/idr.h> 50#include <linux/idr.h>
51#include <linux/major.h> 51#include <linux/major.h>
52#include <linux/file.h>
52 53
53#include <scsi/scsi.h> 54#include <scsi/scsi.h>
54#include <scsi/scsi_driver.h> 55#include <scsi/scsi_driver.h>
@@ -175,10 +176,9 @@ static const struct file_operations osd_fops = {
175 176
176struct osd_dev *osduld_path_lookup(const char *name) 177struct osd_dev *osduld_path_lookup(const char *name)
177{ 178{
178 struct path path; 179 struct osd_uld_device *oud;
179 struct inode *inode; 180 struct osd_dev *od;
180 struct cdev *cdev; 181 struct file *file;
181 struct osd_uld_device *uninitialized_var(oud);
182 int error; 182 int error;
183 183
184 if (!name || !*name) { 184 if (!name || !*name) {
@@ -186,52 +186,46 @@ struct osd_dev *osduld_path_lookup(const char *name)
186 return ERR_PTR(-EINVAL); 186 return ERR_PTR(-EINVAL);
187 } 187 }
188 188
189 error = kern_path(name, LOOKUP_FOLLOW, &path); 189 od = kzalloc(sizeof(*od), GFP_KERNEL);
190 if (error) { 190 if (!od)
191 OSD_ERR("path_lookup of %s failed=>%d\n", name, error); 191 return ERR_PTR(-ENOMEM);
192 return ERR_PTR(error);
193 }
194 192
195 inode = path.dentry->d_inode; 193 file = filp_open(name, O_RDWR, 0);
196 error = -EINVAL; /* Not the right device e.g osd_uld_device */ 194 if (IS_ERR(file)) {
197 if (!S_ISCHR(inode->i_mode)) { 195 error = PTR_ERR(file);
198 OSD_DEBUG("!S_ISCHR()\n"); 196 goto free_od;
199 goto out;
200 } 197 }
201 198
202 cdev = inode->i_cdev; 199 if (file->f_op != &osd_fops){
203 if (!cdev) { 200 error = -EINVAL;
204 OSD_ERR("Before mounting an OSD Based filesystem\n"); 201 goto close_file;
205 OSD_ERR(" user-mode must open+close the %s device\n", name);
206 OSD_ERR(" Example: bash: echo < %s\n", name);
207 goto out;
208 } 202 }
209 203
210 /* The Magic wand. Is it our char-dev */ 204 oud = file->private_data;
211 /* TODO: Support sg devices */
212 if (cdev->owner != THIS_MODULE) {
213 OSD_ERR("Error mounting %s - is not an OSD device\n", name);
214 goto out;
215 }
216 205
217 oud = container_of(cdev, struct osd_uld_device, cdev); 206 *od = oud->od;
207 od->file = file;
218 208
219 __uld_get(oud); 209 return od;
220 error = 0;
221 210
222out: 211close_file:
223 path_put(&path); 212 fput(file);
224 return error ? ERR_PTR(error) : &oud->od; 213free_od:
214 kfree(od);
215 return ERR_PTR(error);
225} 216}
226EXPORT_SYMBOL(osduld_path_lookup); 217EXPORT_SYMBOL(osduld_path_lookup);
227 218
228void osduld_put_device(struct osd_dev *od) 219void osduld_put_device(struct osd_dev *od)
229{ 220{
230 if (od) {
231 struct osd_uld_device *oud = container_of(od,
232 struct osd_uld_device, od);
233 221
234 __uld_put(oud); 222 if (od && !IS_ERR(od)) {
223 struct osd_uld_device *oud = od->file->private_data;
224
225 BUG_ON(od->scsi_device != oud->od.scsi_device);
226
227 fput(od->file);
228 kfree(od);
235 } 229 }
236} 230}
237EXPORT_SYMBOL(osduld_put_device); 231EXPORT_SYMBOL(osduld_put_device);
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 11a61ea8d5d9..70b60ade049e 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -530,7 +530,7 @@ static int nsp_negate_signal(struct scsi_cmnd *SCpnt, unsigned char mask,
530 if (reg == 0xff) { 530 if (reg == 0xff) {
531 break; 531 break;
532 } 532 }
533 } while ((time_out-- != 0) && (reg & mask) != 0); 533 } while ((--time_out != 0) && (reg & mask) != 0);
534 534
535 if (time_out == 0) { 535 if (time_out == 0) {
536 nsp_msg(KERN_DEBUG, " %s signal off timeut", str); 536 nsp_msg(KERN_DEBUG, " %s signal off timeut", str);
@@ -801,7 +801,7 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
801 801
802 data->FifoCount = ocount; 802 data->FifoCount = ocount;
803 803
804 if (time_out == 0) { 804 if (time_out < 0) {
805 nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d", 805 nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d",
806 scsi_get_resid(SCpnt), SCpnt->SCp.this_residual, 806 scsi_get_resid(SCpnt), SCpnt->SCp.this_residual,
807 SCpnt->SCp.buffers_residual); 807 SCpnt->SCp.buffers_residual);
@@ -897,7 +897,7 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
897 897
898 data->FifoCount = ocount; 898 data->FifoCount = ocount;
899 899
900 if (time_out == 0) { 900 if (time_out < 0) {
901 nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x", 901 nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x",
902 scsi_get_resid(SCpnt)); 902 scsi_get_resid(SCpnt));
903 } 903 }
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index ca0dd33497ec..db90caf43f42 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -299,7 +299,7 @@ static irqreturn_t ps3rom_interrupt(int irq, void *data)
299 return IRQ_HANDLED; 299 return IRQ_HANDLED;
300 } 300 }
301 301
302 host = dev->sbd.core.driver_data; 302 host = ps3_system_bus_get_drvdata(&dev->sbd);
303 priv = shost_priv(host); 303 priv = shost_priv(host);
304 cmd = priv->curr_cmd; 304 cmd = priv->curr_cmd;
305 305
@@ -387,7 +387,7 @@ static int __devinit ps3rom_probe(struct ps3_system_bus_device *_dev)
387 } 387 }
388 388
389 priv = shost_priv(host); 389 priv = shost_priv(host);
390 dev->sbd.core.driver_data = host; 390 ps3_system_bus_set_drvdata(&dev->sbd, host);
391 priv->dev = dev; 391 priv->dev = dev;
392 392
393 /* One device/LUN per SCSI bus */ 393 /* One device/LUN per SCSI bus */
@@ -407,7 +407,7 @@ static int __devinit ps3rom_probe(struct ps3_system_bus_device *_dev)
407 407
408fail_host_put: 408fail_host_put:
409 scsi_host_put(host); 409 scsi_host_put(host);
410 dev->sbd.core.driver_data = NULL; 410 ps3_system_bus_set_drvdata(&dev->sbd, NULL);
411fail_teardown: 411fail_teardown:
412 ps3stor_teardown(dev); 412 ps3stor_teardown(dev);
413fail_free_bounce: 413fail_free_bounce:
@@ -418,12 +418,12 @@ fail_free_bounce:
418static int ps3rom_remove(struct ps3_system_bus_device *_dev) 418static int ps3rom_remove(struct ps3_system_bus_device *_dev)
419{ 419{
420 struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); 420 struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
421 struct Scsi_Host *host = dev->sbd.core.driver_data; 421 struct Scsi_Host *host = ps3_system_bus_get_drvdata(&dev->sbd);
422 422
423 scsi_remove_host(host); 423 scsi_remove_host(host);
424 ps3stor_teardown(dev); 424 ps3stor_teardown(dev);
425 scsi_host_put(host); 425 scsi_host_put(host);
426 dev->sbd.core.driver_data = NULL; 426 ps3_system_bus_set_drvdata(&dev->sbd, NULL);
427 kfree(dev->bounce_buf); 427 kfree(dev->bounce_buf);
428 return 0; 428 return 0;
429} 429}
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 5defe5ea5eda..8371d917a9a2 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -17,9 +17,12 @@
17* General Public License for more details. 17* General Public License for more details.
18* 18*
19******************************************************************************/ 19******************************************************************************/
20#define QLA1280_VERSION "3.26" 20#define QLA1280_VERSION "3.27"
21/***************************************************************************** 21/*****************************************************************************
22 Revision History: 22 Revision History:
23 Rev 3.27, February 10, 2009, Michael Reed
24 - General code cleanup.
25 - Improve error recovery.
23 Rev 3.26, January 16, 2006 Jes Sorensen 26 Rev 3.26, January 16, 2006 Jes Sorensen
24 - Ditch all < 2.6 support 27 - Ditch all < 2.6 support
25 Rev 3.25.1, February 10, 2005 Christoph Hellwig 28 Rev 3.25.1, February 10, 2005 Christoph Hellwig
@@ -435,7 +438,6 @@ static int qla1280_mailbox_command(struct scsi_qla_host *,
435 uint8_t, uint16_t *); 438 uint8_t, uint16_t *);
436static int qla1280_bus_reset(struct scsi_qla_host *, int); 439static int qla1280_bus_reset(struct scsi_qla_host *, int);
437static int qla1280_device_reset(struct scsi_qla_host *, int, int); 440static int qla1280_device_reset(struct scsi_qla_host *, int, int);
438static int qla1280_abort_device(struct scsi_qla_host *, int, int, int);
439static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int); 441static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
440static int qla1280_abort_isp(struct scsi_qla_host *); 442static int qla1280_abort_isp(struct scsi_qla_host *);
441#ifdef QLA_64BIT_PTR 443#ifdef QLA_64BIT_PTR
@@ -698,7 +700,7 @@ qla1280_info(struct Scsi_Host *host)
698} 700}
699 701
700/************************************************************************** 702/**************************************************************************
701 * qla1200_queuecommand 703 * qla1280_queuecommand
702 * Queue a command to the controller. 704 * Queue a command to the controller.
703 * 705 *
704 * Note: 706 * Note:
@@ -713,12 +715,14 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
713{ 715{
714 struct Scsi_Host *host = cmd->device->host; 716 struct Scsi_Host *host = cmd->device->host;
715 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 717 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
716 struct srb *sp = (struct srb *)&cmd->SCp; 718 struct srb *sp = (struct srb *)CMD_SP(cmd);
717 int status; 719 int status;
718 720
719 cmd->scsi_done = fn; 721 cmd->scsi_done = fn;
720 sp->cmd = cmd; 722 sp->cmd = cmd;
721 sp->flags = 0; 723 sp->flags = 0;
724 sp->wait = NULL;
725 CMD_HANDLE(cmd) = (unsigned char *)NULL;
722 726
723 qla1280_print_scsi_cmd(5, cmd); 727 qla1280_print_scsi_cmd(5, cmd);
724 728
@@ -738,21 +742,11 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
738 742
739enum action { 743enum action {
740 ABORT_COMMAND, 744 ABORT_COMMAND,
741 ABORT_DEVICE,
742 DEVICE_RESET, 745 DEVICE_RESET,
743 BUS_RESET, 746 BUS_RESET,
744 ADAPTER_RESET, 747 ADAPTER_RESET,
745 FAIL
746}; 748};
747 749
748/* timer action for error action processor */
749static void qla1280_error_wait_timeout(unsigned long __data)
750{
751 struct scsi_cmnd *cmd = (struct scsi_cmnd *)__data;
752 struct srb *sp = (struct srb *)CMD_SP(cmd);
753
754 complete(sp->wait);
755}
756 750
757static void qla1280_mailbox_timeout(unsigned long __data) 751static void qla1280_mailbox_timeout(unsigned long __data)
758{ 752{
@@ -767,8 +761,67 @@ static void qla1280_mailbox_timeout(unsigned long __data)
767 complete(ha->mailbox_wait); 761 complete(ha->mailbox_wait);
768} 762}
769 763
764static int
765_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
766 struct completion *wait)
767{
768 int status = FAILED;
769 struct scsi_cmnd *cmd = sp->cmd;
770
771 spin_unlock_irq(ha->host->host_lock);
772 wait_for_completion_timeout(wait, 4*HZ);
773 spin_lock_irq(ha->host->host_lock);
774 sp->wait = NULL;
775 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
776 status = SUCCESS;
777 (*cmd->scsi_done)(cmd);
778 }
779 return status;
780}
781
782static int
783qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
784{
785 DECLARE_COMPLETION_ONSTACK(wait);
786
787 sp->wait = &wait;
788 return _qla1280_wait_for_single_command(ha, sp, &wait);
789}
790
791static int
792qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
793{
794 int cnt;
795 int status;
796 struct srb *sp;
797 struct scsi_cmnd *cmd;
798
799 status = SUCCESS;
800
801 /*
802 * Wait for all commands with the designated bus/target
803 * to be completed by the firmware
804 */
805 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
806 sp = ha->outstanding_cmds[cnt];
807 if (sp) {
808 cmd = sp->cmd;
809
810 if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
811 continue;
812 if (target >= 0 && SCSI_TCN_32(cmd) != target)
813 continue;
814
815 status = qla1280_wait_for_single_command(ha, sp);
816 if (status == FAILED)
817 break;
818 }
819 }
820 return status;
821}
822
770/************************************************************************** 823/**************************************************************************
771 * qla1200_error_action 824 * qla1280_error_action
772 * The function will attempt to perform a specified error action and 825 * The function will attempt to perform a specified error action and
773 * wait for the results (or time out). 826 * wait for the results (or time out).
774 * 827 *
@@ -780,11 +833,6 @@ static void qla1280_mailbox_timeout(unsigned long __data)
780 * Returns: 833 * Returns:
781 * SUCCESS or FAILED 834 * SUCCESS or FAILED
782 * 835 *
783 * Note:
784 * Resetting the bus always succeeds - is has to, otherwise the
785 * kernel will panic! Try a surgical technique - sending a BUS
786 * DEVICE RESET message - on the offending target before pulling
787 * the SCSI bus reset line.
788 **************************************************************************/ 836 **************************************************************************/
789static int 837static int
790qla1280_error_action(struct scsi_cmnd *cmd, enum action action) 838qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
@@ -792,13 +840,19 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
792 struct scsi_qla_host *ha; 840 struct scsi_qla_host *ha;
793 int bus, target, lun; 841 int bus, target, lun;
794 struct srb *sp; 842 struct srb *sp;
795 uint16_t data; 843 int i, found;
796 unsigned char *handle; 844 int result=FAILED;
797 int result, i; 845 int wait_for_bus=-1;
846 int wait_for_target = -1;
798 DECLARE_COMPLETION_ONSTACK(wait); 847 DECLARE_COMPLETION_ONSTACK(wait);
799 struct timer_list timer; 848
849 ENTER("qla1280_error_action");
800 850
801 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata); 851 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
852 sp = (struct srb *)CMD_SP(cmd);
853 bus = SCSI_BUS_32(cmd);
854 target = SCSI_TCN_32(cmd);
855 lun = SCSI_LUN_32(cmd);
802 856
803 dprintk(4, "error_action %i, istatus 0x%04x\n", action, 857 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
804 RD_REG_WORD(&ha->iobase->istatus)); 858 RD_REG_WORD(&ha->iobase->istatus));
@@ -807,99 +861,47 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
807 RD_REG_WORD(&ha->iobase->host_cmd), 861 RD_REG_WORD(&ha->iobase->host_cmd),
808 RD_REG_WORD(&ha->iobase->ictrl), jiffies); 862 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
809 863
810 ENTER("qla1280_error_action");
811 if (qla1280_verbose) 864 if (qla1280_verbose)
812 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, " 865 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
813 "Handle=0x%p, action=0x%x\n", 866 "Handle=0x%p, action=0x%x\n",
814 ha->host_no, cmd, CMD_HANDLE(cmd), action); 867 ha->host_no, cmd, CMD_HANDLE(cmd), action);
815 868
816 if (cmd == NULL) {
817 printk(KERN_WARNING "(scsi?:?:?:?) Reset called with NULL "
818 "si_Cmnd pointer, failing.\n");
819 LEAVE("qla1280_error_action");
820 return FAILED;
821 }
822
823 ha = (struct scsi_qla_host *)cmd->device->host->hostdata;
824 sp = (struct srb *)CMD_SP(cmd);
825 handle = CMD_HANDLE(cmd);
826
827 /* Check for pending interrupts. */
828 data = qla1280_debounce_register(&ha->iobase->istatus);
829 /*
830 * The io_request_lock is held when the reset handler is called, hence
831 * the interrupt handler cannot be running in parallel as it also
832 * grabs the lock. /Jes
833 */
834 if (data & RISC_INT)
835 qla1280_isr(ha, &ha->done_q);
836
837 /* 869 /*
838 * Determine the suggested action that the mid-level driver wants 870 * Check to see if we have the command in the outstanding_cmds[]
839 * us to perform. 871 * array. If not then it must have completed before this error
872 * action was initiated. If the error_action isn't ABORT_COMMAND
873 * then the driver must proceed with the requested action.
840 */ 874 */
841 if (handle == (unsigned char *)INVALID_HANDLE || handle == NULL) { 875 found = -1;
842 if(action == ABORT_COMMAND) { 876 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
843 /* we never got this command */ 877 if (sp == ha->outstanding_cmds[i]) {
844 printk(KERN_INFO "qla1280: Aborting a NULL handle\n"); 878 found = i;
845 return SUCCESS; /* no action - we don't have command */ 879 sp->wait = &wait; /* we'll wait for it to complete */
880 break;
846 } 881 }
847 } else {
848 sp->wait = &wait;
849 } 882 }
850 883
851 bus = SCSI_BUS_32(cmd); 884 if (found < 0) { /* driver doesn't have command */
852 target = SCSI_TCN_32(cmd); 885 result = SUCCESS;
853 lun = SCSI_LUN_32(cmd); 886 if (qla1280_verbose) {
887 printk(KERN_INFO
888 "scsi(%ld:%d:%d:%d): specified command has "
889 "already completed.\n", ha->host_no, bus,
890 target, lun);
891 }
892 }
854 893
855 /* Overloading result. Here it means the success or fail of the
856 * *issue* of the action. When we return from the routine, it must
857 * mean the actual success or fail of the action */
858 result = FAILED;
859 switch (action) { 894 switch (action) {
860 case FAIL:
861 break;
862 895
863 case ABORT_COMMAND: 896 case ABORT_COMMAND:
864 if ((sp->flags & SRB_ABORT_PENDING)) { 897 dprintk(1, "qla1280: RISC aborting command\n");
865 printk(KERN_WARNING 898 /*
866 "scsi(): Command has a pending abort " 899 * The abort might fail due to race when the host_lock
867 "message - ABORT_PENDING.\n"); 900 * is released to issue the abort. As such, we
868 /* This should technically be impossible since we 901 * don't bother to check the return status.
869 * now wait for abort completion */ 902 */
870 break; 903 if (found >= 0)
871 } 904 qla1280_abort_command(ha, sp, found);
872
873 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
874 if (sp == ha->outstanding_cmds[i]) {
875 dprintk(1, "qla1280: RISC aborting command\n");
876 if (qla1280_abort_command(ha, sp, i) == 0)
877 result = SUCCESS;
878 else {
879 /*
880 * Since we don't know what might
881 * have happend to the command, it
882 * is unsafe to remove it from the
883 * device's queue at this point.
884 * Wait and let the escalation
885 * process take care of it.
886 */
887 printk(KERN_WARNING
888 "scsi(%li:%i:%i:%i): Unable"
889 " to abort command!\n",
890 ha->host_no, bus, target, lun);
891 }
892 }
893 }
894 break;
895
896 case ABORT_DEVICE:
897 if (qla1280_verbose)
898 printk(KERN_INFO
899 "scsi(%ld:%d:%d:%d): Queueing abort device "
900 "command.\n", ha->host_no, bus, target, lun);
901 if (qla1280_abort_device(ha, bus, target, lun) == 0)
902 result = SUCCESS;
903 break; 905 break;
904 906
905 case DEVICE_RESET: 907 case DEVICE_RESET:
@@ -907,16 +909,21 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
907 printk(KERN_INFO 909 printk(KERN_INFO
908 "scsi(%ld:%d:%d:%d): Queueing device reset " 910 "scsi(%ld:%d:%d:%d): Queueing device reset "
909 "command.\n", ha->host_no, bus, target, lun); 911 "command.\n", ha->host_no, bus, target, lun);
910 if (qla1280_device_reset(ha, bus, target) == 0) 912 if (qla1280_device_reset(ha, bus, target) == 0) {
911 result = SUCCESS; 913 /* issued device reset, set wait conditions */
914 wait_for_bus = bus;
915 wait_for_target = target;
916 }
912 break; 917 break;
913 918
914 case BUS_RESET: 919 case BUS_RESET:
915 if (qla1280_verbose) 920 if (qla1280_verbose)
916 printk(KERN_INFO "qla1280(%ld:%d): Issued bus " 921 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
917 "reset.\n", ha->host_no, bus); 922 "reset.\n", ha->host_no, bus);
918 if (qla1280_bus_reset(ha, bus) == 0) 923 if (qla1280_bus_reset(ha, bus) == 0) {
919 result = SUCCESS; 924 /* issued bus reset, set wait conditions */
925 wait_for_bus = bus;
926 }
920 break; 927 break;
921 928
922 case ADAPTER_RESET: 929 case ADAPTER_RESET:
@@ -929,55 +936,48 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
929 "continue automatically\n", ha->host_no); 936 "continue automatically\n", ha->host_no);
930 } 937 }
931 ha->flags.reset_active = 1; 938 ha->flags.reset_active = 1;
932 /* 939
933 * We restarted all of the commands automatically, so the 940 if (qla1280_abort_isp(ha) != 0) { /* it's dead */
934 * mid-level code can expect completions momentitarily. 941 result = FAILED;
935 */ 942 }
936 if (qla1280_abort_isp(ha) == 0)
937 result = SUCCESS;
938 943
939 ha->flags.reset_active = 0; 944 ha->flags.reset_active = 0;
940 } 945 }
941 946
942 if (!list_empty(&ha->done_q)) 947 /*
943 qla1280_done(ha); 948 * At this point, the host_lock has been released and retaken
944 949 * by the issuance of the mailbox command.
945 /* If we didn't manage to issue the action, or we have no 950 * Wait for the command passed in by the mid-layer if it
946 * command to wait for, exit here */ 951 * was found by the driver. It might have been returned
947 if (result == FAILED || handle == NULL || 952 * between eh recovery steps, hence the check of the "found"
948 handle == (unsigned char *)INVALID_HANDLE) { 953 * variable.
949 /* 954 */
950 * Clear completion queue to avoid qla1280_done() trying
951 * to complete the command at a later stage after we
952 * have exited the current context
953 */
954 sp->wait = NULL;
955 goto leave;
956 }
957 955
958 /* set up a timer just in case we're really jammed */ 956 if (found >= 0)
959 init_timer(&timer); 957 result = _qla1280_wait_for_single_command(ha, sp, &wait);
960 timer.expires = jiffies + 4*HZ;
961 timer.data = (unsigned long)cmd;
962 timer.function = qla1280_error_wait_timeout;
963 add_timer(&timer);
964 958
965 /* wait for the action to complete (or the timer to expire) */ 959 if (action == ABORT_COMMAND && result != SUCCESS) {
966 spin_unlock_irq(ha->host->host_lock); 960 printk(KERN_WARNING
967 wait_for_completion(&wait); 961 "scsi(%li:%i:%i:%i): "
968 del_timer_sync(&timer); 962 "Unable to abort command!\n",
969 spin_lock_irq(ha->host->host_lock); 963 ha->host_no, bus, target, lun);
970 sp->wait = NULL; 964 }
971 965
972 /* the only action we might get a fail for is abort */ 966 /*
973 if (action == ABORT_COMMAND) { 967 * If the command passed in by the mid-layer has been
974 if(sp->flags & SRB_ABORTED) 968 * returned by the board, then wait for any additional
975 result = SUCCESS; 969 * commands which are supposed to complete based upon
976 else 970 * the error action.
977 result = FAILED; 971 *
972 * All commands are unconditionally returned during a
973 * call to qla1280_abort_isp(), ADAPTER_RESET. No need
974 * to wait for them.
975 */
976 if (result == SUCCESS && wait_for_bus >= 0) {
977 result = qla1280_wait_for_pending_commands(ha,
978 wait_for_bus, wait_for_target);
978 } 979 }
979 980
980 leave:
981 dprintk(1, "RESET returning %d\n", result); 981 dprintk(1, "RESET returning %d\n", result);
982 982
983 LEAVE("qla1280_error_action"); 983 LEAVE("qla1280_error_action");
@@ -1280,13 +1280,12 @@ qla1280_done(struct scsi_qla_host *ha)
1280 switch ((CMD_RESULT(cmd) >> 16)) { 1280 switch ((CMD_RESULT(cmd) >> 16)) {
1281 case DID_RESET: 1281 case DID_RESET:
1282 /* Issue marker command. */ 1282 /* Issue marker command. */
1283 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); 1283 if (!ha->flags.abort_isp_active)
1284 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1284 break; 1285 break;
1285 case DID_ABORT: 1286 case DID_ABORT:
1286 sp->flags &= ~SRB_ABORT_PENDING; 1287 sp->flags &= ~SRB_ABORT_PENDING;
1287 sp->flags |= SRB_ABORTED; 1288 sp->flags |= SRB_ABORTED;
1288 if (sp->flags & SRB_TIMEOUT)
1289 CMD_RESULT(sp->cmd) = DID_TIME_OUT << 16;
1290 break; 1289 break;
1291 default: 1290 default:
1292 break; 1291 break;
@@ -1296,12 +1295,11 @@ qla1280_done(struct scsi_qla_host *ha)
1296 scsi_dma_unmap(cmd); 1295 scsi_dma_unmap(cmd);
1297 1296
1298 /* Call the mid-level driver interrupt handler */ 1297 /* Call the mid-level driver interrupt handler */
1299 CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE;
1300 ha->actthreads--; 1298 ha->actthreads--;
1301 1299
1302 (*(cmd)->scsi_done)(cmd); 1300 if (sp->wait == NULL)
1303 1301 (*(cmd)->scsi_done)(cmd);
1304 if(sp->wait != NULL) 1302 else
1305 complete(sp->wait); 1303 complete(sp->wait);
1306 } 1304 }
1307 LEAVE("qla1280_done"); 1305 LEAVE("qla1280_done");
@@ -2417,9 +2415,6 @@ static int
2417qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) 2415qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2418{ 2416{
2419 struct device_reg __iomem *reg = ha->iobase; 2417 struct device_reg __iomem *reg = ha->iobase;
2420#if 0
2421 LIST_HEAD(done_q);
2422#endif
2423 int status = 0; 2418 int status = 0;
2424 int cnt; 2419 int cnt;
2425 uint16_t *optr, *iptr; 2420 uint16_t *optr, *iptr;
@@ -2493,19 +2488,9 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2493 mr = MAILBOX_REGISTER_COUNT; 2488 mr = MAILBOX_REGISTER_COUNT;
2494 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t)); 2489 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2495 2490
2496#if 0
2497 /* Go check for any response interrupts pending. */
2498 qla1280_isr(ha, &done_q);
2499#endif
2500
2501 if (ha->flags.reset_marker) 2491 if (ha->flags.reset_marker)
2502 qla1280_rst_aen(ha); 2492 qla1280_rst_aen(ha);
2503 2493
2504#if 0
2505 if (!list_empty(&done_q))
2506 qla1280_done(ha, &done_q);
2507#endif
2508
2509 if (status) 2494 if (status)
2510 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = " 2495 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2511 "0x%x ****\n", mb[0]); 2496 "0x%x ****\n", mb[0]);
@@ -2641,41 +2626,6 @@ qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2641} 2626}
2642 2627
2643/* 2628/*
2644 * qla1280_abort_device
2645 * Issue an abort message to the device
2646 *
2647 * Input:
2648 * ha = adapter block pointer.
2649 * bus = SCSI BUS.
2650 * target = SCSI ID.
2651 * lun = SCSI LUN.
2652 *
2653 * Returns:
2654 * 0 = success
2655 */
2656static int
2657qla1280_abort_device(struct scsi_qla_host *ha, int bus, int target, int lun)
2658{
2659 uint16_t mb[MAILBOX_REGISTER_COUNT];
2660 int status;
2661
2662 ENTER("qla1280_abort_device");
2663
2664 mb[0] = MBC_ABORT_DEVICE;
2665 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2666 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2667
2668 /* Issue marker command. */
2669 qla1280_marker(ha, bus, target, lun, MK_SYNC_ID_LUN);
2670
2671 if (status)
2672 dprintk(2, "qla1280_abort_device: **** FAILED ****\n");
2673
2674 LEAVE("qla1280_abort_device");
2675 return status;
2676}
2677
2678/*
2679 * qla1280_abort_command 2629 * qla1280_abort_command
2680 * Abort command aborts a specified IOCB. 2630 * Abort command aborts a specified IOCB.
2681 * 2631 *
@@ -2833,7 +2783,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2833 2783
2834 /* If room for request in request ring. */ 2784 /* If room for request in request ring. */
2835 if ((req_cnt + 2) >= ha->req_q_cnt) { 2785 if ((req_cnt + 2) >= ha->req_q_cnt) {
2836 status = 1; 2786 status = SCSI_MLQUEUE_HOST_BUSY;
2837 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt=" 2787 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2838 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, 2788 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2839 req_cnt); 2789 req_cnt);
@@ -2845,7 +2795,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2845 ha->outstanding_cmds[cnt] != NULL; cnt++); 2795 ha->outstanding_cmds[cnt] != NULL; cnt++);
2846 2796
2847 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 2797 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2848 status = 1; 2798 status = SCSI_MLQUEUE_HOST_BUSY;
2849 dprintk(2, "qla1280_start_scsi: NO ROOM IN " 2799 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2850 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt); 2800 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2851 goto out; 2801 goto out;
@@ -3108,7 +3058,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3108 ha->req_q_cnt, seg_cnt); 3058 ha->req_q_cnt, seg_cnt);
3109 /* If room for request in request ring. */ 3059 /* If room for request in request ring. */
3110 if ((req_cnt + 2) >= ha->req_q_cnt) { 3060 if ((req_cnt + 2) >= ha->req_q_cnt) {
3111 status = 1; 3061 status = SCSI_MLQUEUE_HOST_BUSY;
3112 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, " 3062 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3113 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index, 3063 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3114 ha->req_q_cnt, req_cnt); 3064 ha->req_q_cnt, req_cnt);
@@ -3120,7 +3070,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3120 (ha->outstanding_cmds[cnt] != 0); cnt++) ; 3070 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3121 3071
3122 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 3072 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3123 status = 1; 3073 status = SCSI_MLQUEUE_HOST_BUSY;
3124 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING " 3074 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3125 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt); 3075 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3126 goto out; 3076 goto out;
@@ -3487,6 +3437,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3487 3437
3488 /* Save ISP completion status */ 3438 /* Save ISP completion status */
3489 CMD_RESULT(sp->cmd) = 0; 3439 CMD_RESULT(sp->cmd) = 0;
3440 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3490 3441
3491 /* Place block on done queue */ 3442 /* Place block on done queue */
3492 list_add_tail(&sp->list, done_q); 3443 list_add_tail(&sp->list, done_q);
@@ -3495,7 +3446,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3495 * If we get here we have a real problem! 3446 * If we get here we have a real problem!
3496 */ 3447 */
3497 printk(KERN_WARNING 3448 printk(KERN_WARNING
3498 "qla1280: ISP invalid handle"); 3449 "qla1280: ISP invalid handle\n");
3499 } 3450 }
3500 } 3451 }
3501 break; 3452 break;
@@ -3753,6 +3704,8 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3753 } 3704 }
3754 } 3705 }
3755 3706
3707 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3708
3756 /* Place command on done queue. */ 3709 /* Place command on done queue. */
3757 list_add_tail(&sp->list, done_q); 3710 list_add_tail(&sp->list, done_q);
3758 out: 3711 out:
@@ -3808,6 +3761,8 @@ qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3808 CMD_RESULT(sp->cmd) = DID_ERROR << 16; 3761 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3809 } 3762 }
3810 3763
3764 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3765
3811 /* Place command on done queue. */ 3766 /* Place command on done queue. */
3812 list_add_tail(&sp->list, done_q); 3767 list_add_tail(&sp->list, done_q);
3813 } 3768 }
@@ -3858,19 +3813,16 @@ qla1280_abort_isp(struct scsi_qla_host *ha)
3858 struct scsi_cmnd *cmd; 3813 struct scsi_cmnd *cmd;
3859 sp = ha->outstanding_cmds[cnt]; 3814 sp = ha->outstanding_cmds[cnt];
3860 if (sp) { 3815 if (sp) {
3861
3862 cmd = sp->cmd; 3816 cmd = sp->cmd;
3863 CMD_RESULT(cmd) = DID_RESET << 16; 3817 CMD_RESULT(cmd) = DID_RESET << 16;
3864 3818 CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3865 sp->cmd = NULL;
3866 ha->outstanding_cmds[cnt] = NULL; 3819 ha->outstanding_cmds[cnt] = NULL;
3867 3820 list_add_tail(&sp->list, &ha->done_q);
3868 (*cmd->scsi_done)(cmd);
3869
3870 sp->flags = 0;
3871 } 3821 }
3872 } 3822 }
3873 3823
3824 qla1280_done(ha);
3825
3874 status = qla1280_load_firmware(ha); 3826 status = qla1280_load_firmware(ha);
3875 if (status) 3827 if (status)
3876 goto out; 3828 goto out;
@@ -3955,13 +3907,6 @@ qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3955 3907
3956 if (scsi_control == SCSI_PHASE_INVALID) { 3908 if (scsi_control == SCSI_PHASE_INVALID) {
3957 ha->bus_settings[bus].scsi_bus_dead = 1; 3909 ha->bus_settings[bus].scsi_bus_dead = 1;
3958#if 0
3959 CMD_RESULT(cp) = DID_NO_CONNECT << 16;
3960 CMD_HANDLE(cp) = INVALID_HANDLE;
3961 /* ha->actthreads--; */
3962
3963 (*(cp)->scsi_done)(cp);
3964#endif
3965 return 1; /* bus is dead */ 3910 return 1; /* bus is dead */
3966 } else { 3911 } else {
3967 ha->bus_settings[bus].scsi_bus_dead = 0; 3912 ha->bus_settings[bus].scsi_bus_dead = 0;
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index d7c44b8d2b4f..834884b9eed5 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -88,7 +88,8 @@
88 88
89/* Maximum outstanding commands in ISP queues */ 89/* Maximum outstanding commands in ISP queues */
90#define MAX_OUTSTANDING_COMMANDS 512 90#define MAX_OUTSTANDING_COMMANDS 512
91#define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS + 2) 91#define COMPLETED_HANDLE ((unsigned char *) \
92 (MAX_OUTSTANDING_COMMANDS + 2))
92 93
93/* ISP request and response entry counts (37-65535) */ 94/* ISP request and response entry counts (37-65535) */
94#define REQUEST_ENTRY_CNT 255 /* Number of request entries. */ 95#define REQUEST_ENTRY_CNT 255 /* Number of request entries. */
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b09993a06576..0f8796201504 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -97,7 +97,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
97 return 0; 97 return 0;
98 98
99 if (IS_NOCACHE_VPD_TYPE(ha)) 99 if (IS_NOCACHE_VPD_TYPE(ha))
100 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_nvram << 2, 100 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
101 ha->nvram_size); 101 ha->nvram_size);
102 return memory_read_from_buffer(buf, count, &off, ha->nvram, 102 return memory_read_from_buffer(buf, count, &off, ha->nvram,
103 ha->nvram_size); 103 ha->nvram_size);
@@ -692,6 +692,109 @@ static struct bin_attribute sysfs_edc_status_attr = {
692 .read = qla2x00_sysfs_read_edc_status, 692 .read = qla2x00_sysfs_read_edc_status,
693}; 693};
694 694
695static ssize_t
696qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
697 struct bin_attribute *bin_attr,
698 char *buf, loff_t off, size_t count)
699{
700 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
701 struct device, kobj)));
702 struct qla_hw_data *ha = vha->hw;
703 int rval;
704 uint16_t actual_size;
705
706 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
707 return 0;
708
709 if (ha->xgmac_data)
710 goto do_read;
711
712 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
713 &ha->xgmac_data_dma, GFP_KERNEL);
714 if (!ha->xgmac_data) {
715 qla_printk(KERN_WARNING, ha,
716 "Unable to allocate memory for XGMAC read-data.\n");
717 return 0;
718 }
719
720do_read:
721 actual_size = 0;
722 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
723
724 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
725 XGMAC_DATA_SIZE, &actual_size);
726 if (rval != QLA_SUCCESS) {
727 qla_printk(KERN_WARNING, ha,
728 "Unable to read XGMAC data (%x).\n", rval);
729 count = 0;
730 }
731
732 count = actual_size > count ? count: actual_size;
733 memcpy(buf, ha->xgmac_data, count);
734
735 return count;
736}
737
738static struct bin_attribute sysfs_xgmac_stats_attr = {
739 .attr = {
740 .name = "xgmac_stats",
741 .mode = S_IRUSR,
742 },
743 .size = 0,
744 .read = qla2x00_sysfs_read_xgmac_stats,
745};
746
747static ssize_t
748qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
749 struct bin_attribute *bin_attr,
750 char *buf, loff_t off, size_t count)
751{
752 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
753 struct device, kobj)));
754 struct qla_hw_data *ha = vha->hw;
755 int rval;
756 uint16_t actual_size;
757
758 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
759 return 0;
760
761 if (ha->dcbx_tlv)
762 goto do_read;
763
764 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
765 &ha->dcbx_tlv_dma, GFP_KERNEL);
766 if (!ha->dcbx_tlv) {
767 qla_printk(KERN_WARNING, ha,
768 "Unable to allocate memory for DCBX TLV read-data.\n");
769 return 0;
770 }
771
772do_read:
773 actual_size = 0;
774 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
775
776 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
777 DCBX_TLV_DATA_SIZE);
778 if (rval != QLA_SUCCESS) {
779 qla_printk(KERN_WARNING, ha,
780 "Unable to read DCBX TLV data (%x).\n", rval);
781 count = 0;
782 }
783
784 memcpy(buf, ha->dcbx_tlv, count);
785
786 return count;
787}
788
789static struct bin_attribute sysfs_dcbx_tlv_attr = {
790 .attr = {
791 .name = "dcbx_tlv",
792 .mode = S_IRUSR,
793 },
794 .size = 0,
795 .read = qla2x00_sysfs_read_dcbx_tlv,
796};
797
695static struct sysfs_entry { 798static struct sysfs_entry {
696 char *name; 799 char *name;
697 struct bin_attribute *attr; 800 struct bin_attribute *attr;
@@ -706,6 +809,8 @@ static struct sysfs_entry {
706 { "reset", &sysfs_reset_attr, }, 809 { "reset", &sysfs_reset_attr, },
707 { "edc", &sysfs_edc_attr, 2 }, 810 { "edc", &sysfs_edc_attr, 2 },
708 { "edc_status", &sysfs_edc_status_attr, 2 }, 811 { "edc_status", &sysfs_edc_status_attr, 2 },
812 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
813 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
709 { NULL }, 814 { NULL },
710}; 815};
711 816
@@ -721,6 +826,8 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
721 continue; 826 continue;
722 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) 827 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
723 continue; 828 continue;
829 if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
830 continue;
724 831
725 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 832 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
726 iter->attr); 833 iter->attr);
@@ -743,6 +850,8 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
743 continue; 850 continue;
744 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) 851 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
745 continue; 852 continue;
853 if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
854 continue;
746 855
747 sysfs_remove_bin_file(&host->shost_gendev.kobj, 856 sysfs_remove_bin_file(&host->shost_gendev.kobj,
748 iter->attr); 857 iter->attr);
@@ -1088,6 +1197,58 @@ qla2x00_flash_block_size_show(struct device *dev,
1088 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); 1197 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1089} 1198}
1090 1199
1200static ssize_t
1201qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1202 char *buf)
1203{
1204 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1205
1206 if (!IS_QLA81XX(vha->hw))
1207 return snprintf(buf, PAGE_SIZE, "\n");
1208
1209 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1210}
1211
1212static ssize_t
1213qla2x00_vn_port_mac_address_show(struct device *dev,
1214 struct device_attribute *attr, char *buf)
1215{
1216 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1217
1218 if (!IS_QLA81XX(vha->hw))
1219 return snprintf(buf, PAGE_SIZE, "\n");
1220
1221 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1222 vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1223 vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1224 vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1225}
1226
1227static ssize_t
1228qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1229 char *buf)
1230{
1231 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1232
1233 return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1234}
1235
1236static ssize_t
1237qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1238 char *buf)
1239{
1240 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1241 int rval;
1242 uint16_t state[5];
1243
1244 rval = qla2x00_get_firmware_state(vha, state);
1245 if (rval != QLA_SUCCESS)
1246 memset(state, -1, sizeof(state));
1247
1248 return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
1249 state[1], state[2], state[3], state[4]);
1250}
1251
1091static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 1252static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1092static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 1253static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1093static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 1254static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1116,6 +1277,11 @@ static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
1116static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); 1277static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
1117static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, 1278static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1118 NULL); 1279 NULL);
1280static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1281static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1282 qla2x00_vn_port_mac_address_show, NULL);
1283static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1284static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1119 1285
1120struct device_attribute *qla2x00_host_attrs[] = { 1286struct device_attribute *qla2x00_host_attrs[] = {
1121 &dev_attr_driver_version, 1287 &dev_attr_driver_version,
@@ -1138,6 +1304,10 @@ struct device_attribute *qla2x00_host_attrs[] = {
1138 &dev_attr_mpi_version, 1304 &dev_attr_mpi_version,
1139 &dev_attr_phy_version, 1305 &dev_attr_phy_version,
1140 &dev_attr_flash_block_size, 1306 &dev_attr_flash_block_size,
1307 &dev_attr_vlan_id,
1308 &dev_attr_vn_port_mac_address,
1309 &dev_attr_fabric_param,
1310 &dev_attr_fw_state,
1141 NULL, 1311 NULL,
1142}; 1312};
1143 1313
@@ -1313,7 +1483,8 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1313 * At this point all fcport's software-states are cleared. Perform any 1483 * At this point all fcport's software-states are cleared. Perform any
1314 * final cleanup of firmware resources (PCBs and XCBs). 1484 * final cleanup of firmware resources (PCBs and XCBs).
1315 */ 1485 */
1316 if (fcport->loop_id != FC_NO_LOOP_ID) 1486 if (fcport->loop_id != FC_NO_LOOP_ID &&
1487 !test_bit(UNLOADING, &fcport->vha->dpc_flags))
1317 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, 1488 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1318 fcport->loop_id, fcport->d_id.b.domain, 1489 fcport->loop_id, fcport->d_id.b.domain,
1319 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1490 fcport->d_id.b.area, fcport->d_id.b.al_pa);
@@ -1437,11 +1608,13 @@ static int
1437qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) 1608qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1438{ 1609{
1439 int ret = 0; 1610 int ret = 0;
1440 int cnt = 0; 1611 uint8_t qos = 0;
1441 uint8_t qos = QLA_DEFAULT_QUE_QOS;
1442 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 1612 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1443 scsi_qla_host_t *vha = NULL; 1613 scsi_qla_host_t *vha = NULL;
1444 struct qla_hw_data *ha = base_vha->hw; 1614 struct qla_hw_data *ha = base_vha->hw;
1615 uint16_t options = 0;
1616 int cnt;
1617 struct req_que *req = ha->req_q_map[0];
1445 1618
1446 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1619 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1447 if (ret) { 1620 if (ret) {
@@ -1497,23 +1670,39 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1497 1670
1498 qla24xx_vport_disable(fc_vport, disable); 1671 qla24xx_vport_disable(fc_vport, disable);
1499 1672
1500 /* Create a queue pair for the vport */ 1673 if (ql2xmultique_tag) {
1501 if (ha->mqenable) { 1674 req = ha->req_q_map[1];
1502 if (ha->npiv_info) { 1675 goto vport_queue;
1503 for (; cnt < ha->nvram_npiv_size; cnt++) { 1676 } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1504 if (ha->npiv_info[cnt].port_name == 1677 goto vport_queue;
1505 vha->port_name && 1678 /* Create a request queue in QoS mode for the vport */
1506 ha->npiv_info[cnt].node_name == 1679 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
1507 vha->node_name) { 1680 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
1508 qos = ha->npiv_info[cnt].q_qos; 1681 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
1509 break; 1682 8) == 0) {
1510 } 1683 qos = ha->npiv_info[cnt].q_qos;
1511 } 1684 break;
1685 }
1686 }
1687 if (qos) {
1688 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1689 qos);
1690 if (!ret)
1691 qla_printk(KERN_WARNING, ha,
1692 "Can't create request queue for vp_idx:%d\n",
1693 vha->vp_idx);
1694 else {
1695 DEBUG2(qla_printk(KERN_INFO, ha,
1696 "Request Que:%d (QoS: %d) created for vp_idx:%d\n",
1697 ret, qos, vha->vp_idx));
1698 req = ha->req_q_map[ret];
1512 } 1699 }
1513 qla25xx_create_queues(vha, qos);
1514 } 1700 }
1515 1701
1702vport_queue:
1703 vha->req = req;
1516 return 0; 1704 return 0;
1705
1517vport_create_failed_2: 1706vport_create_failed_2:
1518 qla24xx_disable_vp(vha); 1707 qla24xx_disable_vp(vha);
1519 qla24xx_deallocate_vp_id(vha); 1708 qla24xx_deallocate_vp_id(vha);
@@ -1554,8 +1743,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1554 vha->host_no, vha->vp_idx, vha)); 1743 vha->host_no, vha->vp_idx, vha));
1555 } 1744 }
1556 1745
1557 if (ha->mqenable) { 1746 if (vha->req->id && !ql2xmultique_tag) {
1558 if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS) 1747 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1559 qla_printk(KERN_WARNING, ha, 1748 qla_printk(KERN_WARNING, ha,
1560 "Queue delete failed.\n"); 1749 "Queue delete failed.\n");
1561 } 1750 }
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 34760f8d4f17..cca8e4ab0372 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -149,11 +149,9 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
149 int rval = QLA_SUCCESS; 149 int rval = QLA_SUCCESS;
150 uint32_t cnt; 150 uint32_t cnt;
151 151
152 if (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE)
153 return rval;
154
155 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE); 152 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
156 for (cnt = 30000; (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0 && 153 for (cnt = 30000;
154 ((RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED) == 0) &&
157 rval == QLA_SUCCESS; cnt--) { 155 rval == QLA_SUCCESS; cnt--) {
158 if (cnt) 156 if (cnt)
159 udelay(100); 157 udelay(100);
@@ -218,7 +216,7 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
218 216
219static int 217static int
220qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram, 218qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
221 uint16_t ram_words, void **nxt) 219 uint32_t ram_words, void **nxt)
222{ 220{
223 int rval; 221 int rval;
224 uint32_t cnt, stat, timer, words, idx; 222 uint32_t cnt, stat, timer, words, idx;
@@ -351,7 +349,7 @@ static inline void *
351qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 349qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
352{ 350{
353 uint32_t cnt, que_idx; 351 uint32_t cnt, que_idx;
354 uint8_t req_cnt, rsp_cnt, que_cnt; 352 uint8_t que_cnt;
355 struct qla2xxx_mq_chain *mq = ptr; 353 struct qla2xxx_mq_chain *mq = ptr;
356 struct device_reg_25xxmq __iomem *reg; 354 struct device_reg_25xxmq __iomem *reg;
357 355
@@ -363,9 +361,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
363 mq->type = __constant_htonl(DUMP_CHAIN_MQ); 361 mq->type = __constant_htonl(DUMP_CHAIN_MQ);
364 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain)); 362 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
365 363
366 req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues); 364 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
367 rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); 365 ha->max_req_queues : ha->max_rsp_queues;
368 que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
369 mq->count = htonl(que_cnt); 366 mq->count = htonl(que_cnt);
370 for (cnt = 0; cnt < que_cnt; cnt++) { 367 for (cnt = 0; cnt < que_cnt; cnt++) {
371 reg = (struct device_reg_25xxmq *) ((void *) 368 reg = (struct device_reg_25xxmq *) ((void *)
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 714ee67567e1..00aa48d975a6 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -93,6 +93,7 @@
93#define LSD(x) ((uint32_t)((uint64_t)(x))) 93#define LSD(x) ((uint32_t)((uint64_t)(x)))
94#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) 94#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
95 95
96#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y)))
96 97
97/* 98/*
98 * I/O register 99 * I/O register
@@ -179,6 +180,7 @@
179#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */ 180#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */
180#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ 181#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
181#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ 182#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
183#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
182 184
183struct req_que; 185struct req_que;
184 186
@@ -186,7 +188,6 @@ struct req_que;
186 * SCSI Request Block 188 * SCSI Request Block
187 */ 189 */
188typedef struct srb { 190typedef struct srb {
189 struct req_que *que;
190 struct fc_port *fcport; 191 struct fc_port *fcport;
191 192
192 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ 193 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
@@ -2008,7 +2009,7 @@ typedef struct vport_params {
2008#define VP_RET_CODE_NOT_FOUND 6 2009#define VP_RET_CODE_NOT_FOUND 6
2009 2010
2010struct qla_hw_data; 2011struct qla_hw_data;
2011 2012struct rsp_que;
2012/* 2013/*
2013 * ISP operations 2014 * ISP operations
2014 */ 2015 */
@@ -2030,10 +2031,9 @@ struct isp_operations {
2030 void (*enable_intrs) (struct qla_hw_data *); 2031 void (*enable_intrs) (struct qla_hw_data *);
2031 void (*disable_intrs) (struct qla_hw_data *); 2032 void (*disable_intrs) (struct qla_hw_data *);
2032 2033
2033 int (*abort_command) (struct scsi_qla_host *, srb_t *, 2034 int (*abort_command) (srb_t *);
2034 struct req_que *); 2035 int (*target_reset) (struct fc_port *, unsigned int, int);
2035 int (*target_reset) (struct fc_port *, unsigned int); 2036 int (*lun_reset) (struct fc_port *, unsigned int, int);
2036 int (*lun_reset) (struct fc_port *, unsigned int);
2037 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, 2037 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
2038 uint8_t, uint8_t, uint16_t *, uint8_t); 2038 uint8_t, uint8_t, uint16_t *, uint8_t);
2039 int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t, 2039 int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t,
@@ -2079,7 +2079,6 @@ struct isp_operations {
2079#define QLA_PCI_MSIX_CONTROL 0xa2 2079#define QLA_PCI_MSIX_CONTROL 0xa2
2080 2080
2081struct scsi_qla_host; 2081struct scsi_qla_host;
2082struct rsp_que;
2083 2082
2084struct qla_msix_entry { 2083struct qla_msix_entry {
2085 int have_irq; 2084 int have_irq;
@@ -2140,7 +2139,6 @@ struct qla_statistics {
2140#define MBC_INITIALIZE_MULTIQ 0x1f 2139#define MBC_INITIALIZE_MULTIQ 0x1f
2141#define QLA_QUE_PAGE 0X1000 2140#define QLA_QUE_PAGE 0X1000
2142#define QLA_MQ_SIZE 32 2141#define QLA_MQ_SIZE 32
2143#define QLA_MAX_HOST_QUES 16
2144#define QLA_MAX_QUEUES 256 2142#define QLA_MAX_QUEUES 256
2145#define ISP_QUE_REG(ha, id) \ 2143#define ISP_QUE_REG(ha, id) \
2146 ((ha->mqenable) ? \ 2144 ((ha->mqenable) ? \
@@ -2170,6 +2168,8 @@ struct rsp_que {
2170 struct qla_hw_data *hw; 2168 struct qla_hw_data *hw;
2171 struct qla_msix_entry *msix; 2169 struct qla_msix_entry *msix;
2172 struct req_que *req; 2170 struct req_que *req;
2171 srb_t *status_srb; /* status continuation entry */
2172 struct work_struct q_work;
2173}; 2173};
2174 2174
2175/* Request queue data structure */ 2175/* Request queue data structure */
@@ -2222,6 +2222,8 @@ struct qla_hw_data {
2222 uint32_t fce_enabled :1; 2222 uint32_t fce_enabled :1;
2223 uint32_t fac_supported :1; 2223 uint32_t fac_supported :1;
2224 uint32_t chip_reset_done :1; 2224 uint32_t chip_reset_done :1;
2225 uint32_t port0 :1;
2226 uint32_t running_gold_fw :1;
2225 } flags; 2227 } flags;
2226 2228
2227 /* This spinlock is used to protect "io transactions", you must 2229 /* This spinlock is used to protect "io transactions", you must
@@ -2246,7 +2248,8 @@ struct qla_hw_data {
2246 struct rsp_que **rsp_q_map; 2248 struct rsp_que **rsp_q_map;
2247 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; 2249 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2248 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; 2250 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2249 uint16_t max_queues; 2251 uint8_t max_req_queues;
2252 uint8_t max_rsp_queues;
2250 struct qla_npiv_entry *npiv_info; 2253 struct qla_npiv_entry *npiv_info;
2251 uint16_t nvram_npiv_size; 2254 uint16_t nvram_npiv_size;
2252 2255
@@ -2255,6 +2258,9 @@ struct qla_hw_data {
2255#define FLOGI_MID_SUPPORT BIT_10 2258#define FLOGI_MID_SUPPORT BIT_10
2256#define FLOGI_VSAN_SUPPORT BIT_12 2259#define FLOGI_VSAN_SUPPORT BIT_12
2257#define FLOGI_SP_SUPPORT BIT_13 2260#define FLOGI_SP_SUPPORT BIT_13
2261
2262 uint8_t port_no; /* Physical port of adapter */
2263
2258 /* Timeout timers. */ 2264 /* Timeout timers. */
2259 uint8_t loop_down_abort_time; /* port down timer */ 2265 uint8_t loop_down_abort_time; /* port down timer */
2260 atomic_t loop_down_timer; /* loop down timer */ 2266 atomic_t loop_down_timer; /* loop down timer */
@@ -2392,6 +2398,14 @@ struct qla_hw_data {
2392 dma_addr_t edc_data_dma; 2398 dma_addr_t edc_data_dma;
2393 uint16_t edc_data_len; 2399 uint16_t edc_data_len;
2394 2400
2401#define XGMAC_DATA_SIZE PAGE_SIZE
2402 void *xgmac_data;
2403 dma_addr_t xgmac_data_dma;
2404
2405#define DCBX_TLV_DATA_SIZE PAGE_SIZE
2406 void *dcbx_tlv;
2407 dma_addr_t dcbx_tlv_dma;
2408
2395 struct task_struct *dpc_thread; 2409 struct task_struct *dpc_thread;
2396 uint8_t dpc_active; /* DPC routine is active */ 2410 uint8_t dpc_active; /* DPC routine is active */
2397 2411
@@ -2510,6 +2524,7 @@ struct qla_hw_data {
2510 uint32_t flt_region_vpd; 2524 uint32_t flt_region_vpd;
2511 uint32_t flt_region_nvram; 2525 uint32_t flt_region_nvram;
2512 uint32_t flt_region_npiv_conf; 2526 uint32_t flt_region_npiv_conf;
2527 uint32_t flt_region_gold_fw;
2513 2528
2514 /* Needed for BEACON */ 2529 /* Needed for BEACON */
2515 uint16_t beacon_blink_led; 2530 uint16_t beacon_blink_led;
@@ -2536,6 +2551,7 @@ struct qla_hw_data {
2536 struct qla_chip_state_84xx *cs84xx; 2551 struct qla_chip_state_84xx *cs84xx;
2537 struct qla_statistics qla_stats; 2552 struct qla_statistics qla_stats;
2538 struct isp_operations *isp_ops; 2553 struct isp_operations *isp_ops;
2554 struct workqueue_struct *wq;
2539}; 2555};
2540 2556
2541/* 2557/*
@@ -2545,6 +2561,8 @@ typedef struct scsi_qla_host {
2545 struct list_head list; 2561 struct list_head list;
2546 struct list_head vp_fcports; /* list of fcports */ 2562 struct list_head vp_fcports; /* list of fcports */
2547 struct list_head work_list; 2563 struct list_head work_list;
2564 spinlock_t work_lock;
2565
2548 /* Commonly used flags and state information. */ 2566 /* Commonly used flags and state information. */
2549 struct Scsi_Host *host; 2567 struct Scsi_Host *host;
2550 unsigned long host_no; 2568 unsigned long host_no;
@@ -2591,8 +2609,6 @@ typedef struct scsi_qla_host {
2591#define SWITCH_FOUND BIT_0 2609#define SWITCH_FOUND BIT_0
2592#define DFLG_NO_CABLE BIT_1 2610#define DFLG_NO_CABLE BIT_1
2593 2611
2594 srb_t *status_srb; /* Status continuation entry. */
2595
2596 /* ISP configuration data. */ 2612 /* ISP configuration data. */
2597 uint16_t loop_id; /* Host adapter loop id */ 2613 uint16_t loop_id; /* Host adapter loop id */
2598 2614
@@ -2618,6 +2634,11 @@ typedef struct scsi_qla_host {
2618 uint8_t node_name[WWN_SIZE]; 2634 uint8_t node_name[WWN_SIZE];
2619 uint8_t port_name[WWN_SIZE]; 2635 uint8_t port_name[WWN_SIZE];
2620 uint8_t fabric_node_name[WWN_SIZE]; 2636 uint8_t fabric_node_name[WWN_SIZE];
2637
2638 uint16_t fcoe_vlan_id;
2639 uint16_t fcoe_fcf_idx;
2640 uint8_t fcoe_vn_port_mac[6];
2641
2621 uint32_t vp_abort_cnt; 2642 uint32_t vp_abort_cnt;
2622 2643
2623 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ 2644 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
@@ -2643,7 +2664,7 @@ typedef struct scsi_qla_host {
2643#define VP_ERR_FAB_LOGOUT 4 2664#define VP_ERR_FAB_LOGOUT 4
2644#define VP_ERR_ADAP_NORESOURCES 5 2665#define VP_ERR_ADAP_NORESOURCES 5
2645 struct qla_hw_data *hw; 2666 struct qla_hw_data *hw;
2646 int req_ques[QLA_MAX_HOST_QUES]; 2667 struct req_que *req;
2647} scsi_qla_host_t; 2668} scsi_qla_host_t;
2648 2669
2649/* 2670/*
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 96ccb9642ba0..dfde2dd865cb 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -878,7 +878,6 @@ struct device_reg_24xx {
878 /* HCCR statuses. */ 878 /* HCCR statuses. */
879#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */ 879#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */
880#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */ 880#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */
881#define HCCRX_RISC_PAUSE BIT_4 /* RISC Pause mode bit. */
882 /* HCCR commands. */ 881 /* HCCR commands. */
883 /* NOOP. */ 882 /* NOOP. */
884#define HCCRX_NOOP 0x00000000 883#define HCCRX_NOOP 0x00000000
@@ -1241,6 +1240,7 @@ struct qla_flt_header {
1241#define FLT_REG_HW_EVENT_1 0x1f 1240#define FLT_REG_HW_EVENT_1 0x1f
1242#define FLT_REG_NPIV_CONF_0 0x29 1241#define FLT_REG_NPIV_CONF_0 0x29
1243#define FLT_REG_NPIV_CONF_1 0x2a 1242#define FLT_REG_NPIV_CONF_1 0x2a
1243#define FLT_REG_GOLD_FW 0x2f
1244 1244
1245struct qla_flt_region { 1245struct qla_flt_region {
1246 uint32_t code; 1246 uint32_t code;
@@ -1405,6 +1405,8 @@ struct access_chip_rsp_84xx {
1405#define MBC_IDC_ACK 0x101 1405#define MBC_IDC_ACK 0x101
1406#define MBC_RESTART_MPI_FW 0x3d 1406#define MBC_RESTART_MPI_FW 0x3d
1407#define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */ 1407#define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */
1408#define MBC_GET_XGMAC_STATS 0x7a
1409#define MBC_GET_DCBX_PARAMS 0x51
1408 1410
1409/* Flash access control option field bit definitions */ 1411/* Flash access control option field bit definitions */
1410#define FAC_OPT_FORCE_SEMAPHORE BIT_15 1412#define FAC_OPT_FORCE_SEMAPHORE BIT_15
@@ -1711,7 +1713,7 @@ struct ex_init_cb_81xx {
1711#define FA_VPD0_ADDR_81 0xD0000 1713#define FA_VPD0_ADDR_81 0xD0000
1712#define FA_VPD1_ADDR_81 0xD0400 1714#define FA_VPD1_ADDR_81 0xD0400
1713#define FA_NVRAM0_ADDR_81 0xD0080 1715#define FA_NVRAM0_ADDR_81 0xD0080
1714#define FA_NVRAM1_ADDR_81 0xD0480 1716#define FA_NVRAM1_ADDR_81 0xD0180
1715#define FA_FEATURE_ADDR_81 0xD4000 1717#define FA_FEATURE_ADDR_81 0xD4000
1716#define FA_FLASH_DESCR_ADDR_81 0xD8000 1718#define FA_FLASH_DESCR_ADDR_81 0xD8000
1717#define FA_FLASH_LAYOUT_ADDR_81 0xD8400 1719#define FA_FLASH_LAYOUT_ADDR_81 0xD8400
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 528913f6bed9..65b12d82867c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -65,8 +65,11 @@ extern int ql2xfdmienable;
65extern int ql2xallocfwdump; 65extern int ql2xallocfwdump;
66extern int ql2xextended_error_logging; 66extern int ql2xextended_error_logging;
67extern int ql2xqfullrampup; 67extern int ql2xqfullrampup;
68extern int ql2xqfulltracking;
68extern int ql2xiidmaenable; 69extern int ql2xiidmaenable;
69extern int ql2xmaxqueues; 70extern int ql2xmaxqueues;
71extern int ql2xmultique_tag;
72extern int ql2xfwloadbin;
70 73
71extern int qla2x00_loop_reset(scsi_qla_host_t *); 74extern int qla2x00_loop_reset(scsi_qla_host_t *);
72extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 75extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -145,7 +148,7 @@ qla2x00_dump_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
145extern int 148extern int
146qla2x00_execute_fw(scsi_qla_host_t *, uint32_t); 149qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
147 150
148extern void 151extern int
149qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *, 152qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *,
150 uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *); 153 uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *);
151 154
@@ -165,13 +168,13 @@ extern int
165qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); 168qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
166 169
167extern int 170extern int
168qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); 171qla2x00_abort_command(srb_t *);
169 172
170extern int 173extern int
171qla2x00_abort_target(struct fc_port *, unsigned int); 174qla2x00_abort_target(struct fc_port *, unsigned int, int);
172 175
173extern int 176extern int
174qla2x00_lun_reset(struct fc_port *, unsigned int); 177qla2x00_lun_reset(struct fc_port *, unsigned int, int);
175 178
176extern int 179extern int
177qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, 180qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
@@ -236,9 +239,11 @@ extern int
236qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, 239qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
237 dma_addr_t); 240 dma_addr_t);
238 241
239extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); 242extern int qla24xx_abort_command(srb_t *);
240extern int qla24xx_abort_target(struct fc_port *, unsigned int); 243extern int
241extern int qla24xx_lun_reset(struct fc_port *, unsigned int); 244qla24xx_abort_target(struct fc_port *, unsigned int, int);
245extern int
246qla24xx_lun_reset(struct fc_port *, unsigned int, int);
242 247
243extern int 248extern int
244qla2x00_system_error(scsi_qla_host_t *); 249qla2x00_system_error(scsi_qla_host_t *);
@@ -288,6 +293,18 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *, int);
288extern int 293extern int
289qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t); 294qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t);
290 295
296extern int
297qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *);
298
299extern int
300qla2x00_get_dcbx_params(scsi_qla_host_t *, dma_addr_t, uint16_t);
301
302extern int
303qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
304
305extern int
306qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
307
291/* 308/*
292 * Global Function Prototypes in qla_isr.c source file. 309 * Global Function Prototypes in qla_isr.c source file.
293 */ 310 */
@@ -295,8 +312,8 @@ extern irqreturn_t qla2100_intr_handler(int, void *);
295extern irqreturn_t qla2300_intr_handler(int, void *); 312extern irqreturn_t qla2300_intr_handler(int, void *);
296extern irqreturn_t qla24xx_intr_handler(int, void *); 313extern irqreturn_t qla24xx_intr_handler(int, void *);
297extern void qla2x00_process_response_queue(struct rsp_que *); 314extern void qla2x00_process_response_queue(struct rsp_que *);
298extern void qla24xx_process_response_queue(struct rsp_que *); 315extern void
299 316qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *);
300extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *); 317extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
301extern void qla2x00_free_irqs(scsi_qla_host_t *); 318extern void qla2x00_free_irqs(scsi_qla_host_t *);
302 319
@@ -401,19 +418,21 @@ extern int qla25xx_request_irq(struct rsp_que *);
401extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); 418extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
402extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); 419extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
403extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, 420extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
404 uint16_t, uint8_t, uint8_t); 421 uint16_t, int, uint8_t);
405extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, 422extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
406 uint16_t); 423 uint16_t, int);
407extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t); 424extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t);
408extern void qla2x00_init_response_q_entries(struct rsp_que *); 425extern void qla2x00_init_response_q_entries(struct rsp_que *);
409extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *); 426extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
410extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *); 427extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
411extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t); 428extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t);
412extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t); 429extern int qla25xx_delete_queues(struct scsi_qla_host *);
413extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t); 430extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
414extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t); 431extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
415extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 432extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
416extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 433extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
417extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 434extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
418extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 435extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
436extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
437
419#endif /* _QLA_GBL_H */ 438#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 557f58d5bf88..917534b9f221 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1107,7 +1107,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1107 return ret; 1107 return ret;
1108 1108
1109 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, 1109 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
1110 mb, BIT_1); 1110 mb, BIT_1|BIT_0);
1111 if (mb[0] != MBS_COMMAND_COMPLETE) { 1111 if (mb[0] != MBS_COMMAND_COMPLETE) {
1112 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " 1112 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
1113 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n", 1113 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
@@ -1879,6 +1879,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1879 case BIT_13: 1879 case BIT_13:
1880 list[i].fp_speed = PORT_SPEED_4GB; 1880 list[i].fp_speed = PORT_SPEED_4GB;
1881 break; 1881 break;
1882 case BIT_12:
1883 list[i].fp_speed = PORT_SPEED_10GB;
1884 break;
1882 case BIT_11: 1885 case BIT_11:
1883 list[i].fp_speed = PORT_SPEED_8GB; 1886 list[i].fp_speed = PORT_SPEED_8GB;
1884 break; 1887 break;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index bd7dd84c0648..f2ce8e3cc91b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -634,7 +634,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
634 goto chip_diag_failed; 634 goto chip_diag_failed;
635 635
636 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n", 636 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n",
637 ha->host_no)); 637 vha->host_no));
638 638
639 /* Reset RISC processor. */ 639 /* Reset RISC processor. */
640 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 640 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -655,7 +655,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
655 goto chip_diag_failed; 655 goto chip_diag_failed;
656 656
657 /* Check product ID of chip */ 657 /* Check product ID of chip */
658 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", ha->host_no)); 658 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no));
659 659
660 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 660 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
661 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 661 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
@@ -730,9 +730,6 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
730 struct qla_hw_data *ha = vha->hw; 730 struct qla_hw_data *ha = vha->hw;
731 struct req_que *req = ha->req_q_map[0]; 731 struct req_que *req = ha->req_q_map[0];
732 732
733 /* Perform RISC reset. */
734 qla24xx_reset_risc(vha);
735
736 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 733 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
737 734
738 rval = qla2x00_mbx_reg_test(vha); 735 rval = qla2x00_mbx_reg_test(vha);
@@ -786,7 +783,6 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
786 sizeof(uint32_t); 783 sizeof(uint32_t);
787 if (ha->mqenable) 784 if (ha->mqenable)
788 mq_size = sizeof(struct qla2xxx_mq_chain); 785 mq_size = sizeof(struct qla2xxx_mq_chain);
789
790 /* Allocate memory for Fibre Channel Event Buffer. */ 786 /* Allocate memory for Fibre Channel Event Buffer. */
791 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 787 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
792 goto try_eft; 788 goto try_eft;
@@ -850,8 +846,7 @@ cont_alloc:
850 rsp_q_size = rsp->length * sizeof(response_t); 846 rsp_q_size = rsp->length * sizeof(response_t);
851 847
852 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 848 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
853 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + 849 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
854 eft_size;
855 ha->chain_offset = dump_size; 850 ha->chain_offset = dump_size;
856 dump_size += mq_size + fce_size; 851 dump_size += mq_size + fce_size;
857 852
@@ -891,6 +886,56 @@ cont_alloc:
891 htonl(offsetof(struct qla2xxx_fw_dump, isp)); 886 htonl(offsetof(struct qla2xxx_fw_dump, isp));
892} 887}
893 888
889static int
890qla81xx_mpi_sync(scsi_qla_host_t *vha)
891{
892#define MPS_MASK 0xe0
893 int rval;
894 uint16_t dc;
895 uint32_t dw;
896 struct qla_hw_data *ha = vha->hw;
897
898 if (!IS_QLA81XX(vha->hw))
899 return QLA_SUCCESS;
900
901 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
902 if (rval != QLA_SUCCESS) {
903 DEBUG2(qla_printk(KERN_WARNING, ha,
904 "Sync-MPI: Unable to acquire semaphore.\n"));
905 goto done;
906 }
907
908 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
909 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
910 if (rval != QLA_SUCCESS) {
911 DEBUG2(qla_printk(KERN_WARNING, ha,
912 "Sync-MPI: Unable to read sync.\n"));
913 goto done_release;
914 }
915
916 dc &= MPS_MASK;
917 if (dc == (dw & MPS_MASK))
918 goto done_release;
919
920 dw &= ~MPS_MASK;
921 dw |= dc;
922 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
923 if (rval != QLA_SUCCESS) {
924 DEBUG2(qla_printk(KERN_WARNING, ha,
925 "Sync-MPI: Unable to gain sync.\n"));
926 }
927
928done_release:
929 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
930 if (rval != QLA_SUCCESS) {
931 DEBUG2(qla_printk(KERN_WARNING, ha,
932 "Sync-MPI: Unable to release semaphore.\n"));
933 }
934
935done:
936 return rval;
937}
938
894/** 939/**
895 * qla2x00_setup_chip() - Load and start RISC firmware. 940 * qla2x00_setup_chip() - Load and start RISC firmware.
896 * @ha: HA context 941 * @ha: HA context
@@ -915,6 +960,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
915 spin_unlock_irqrestore(&ha->hardware_lock, flags); 960 spin_unlock_irqrestore(&ha->hardware_lock, flags);
916 } 961 }
917 962
963 qla81xx_mpi_sync(vha);
964
918 /* Load firmware sequences */ 965 /* Load firmware sequences */
919 rval = ha->isp_ops->load_risc(vha, &srisc_address); 966 rval = ha->isp_ops->load_risc(vha, &srisc_address);
920 if (rval == QLA_SUCCESS) { 967 if (rval == QLA_SUCCESS) {
@@ -931,13 +978,16 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
931 /* Retrieve firmware information. */ 978 /* Retrieve firmware information. */
932 if (rval == QLA_SUCCESS) { 979 if (rval == QLA_SUCCESS) {
933 fw_major_version = ha->fw_major_version; 980 fw_major_version = ha->fw_major_version;
934 qla2x00_get_fw_version(vha, 981 rval = qla2x00_get_fw_version(vha,
935 &ha->fw_major_version, 982 &ha->fw_major_version,
936 &ha->fw_minor_version, 983 &ha->fw_minor_version,
937 &ha->fw_subminor_version, 984 &ha->fw_subminor_version,
938 &ha->fw_attributes, &ha->fw_memory_size, 985 &ha->fw_attributes, &ha->fw_memory_size,
939 ha->mpi_version, &ha->mpi_capabilities, 986 ha->mpi_version, &ha->mpi_capabilities,
940 ha->phy_version); 987 ha->phy_version);
988 if (rval != QLA_SUCCESS)
989 goto failed;
990
941 ha->flags.npiv_supported = 0; 991 ha->flags.npiv_supported = 0;
942 if (IS_QLA2XXX_MIDTYPE(ha) && 992 if (IS_QLA2XXX_MIDTYPE(ha) &&
943 (ha->fw_attributes & BIT_2)) { 993 (ha->fw_attributes & BIT_2)) {
@@ -989,7 +1039,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
989 ha->fw_subminor_version); 1039 ha->fw_subminor_version);
990 } 1040 }
991 } 1041 }
992 1042failed:
993 if (rval) { 1043 if (rval) {
994 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 1044 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
995 vha->host_no)); 1045 vha->host_no));
@@ -1013,12 +1063,14 @@ qla2x00_init_response_q_entries(struct rsp_que *rsp)
1013 uint16_t cnt; 1063 uint16_t cnt;
1014 response_t *pkt; 1064 response_t *pkt;
1015 1065
1066 rsp->ring_ptr = rsp->ring;
1067 rsp->ring_index = 0;
1068 rsp->status_srb = NULL;
1016 pkt = rsp->ring_ptr; 1069 pkt = rsp->ring_ptr;
1017 for (cnt = 0; cnt < rsp->length; cnt++) { 1070 for (cnt = 0; cnt < rsp->length; cnt++) {
1018 pkt->signature = RESPONSE_PROCESSED; 1071 pkt->signature = RESPONSE_PROCESSED;
1019 pkt++; 1072 pkt++;
1020 } 1073 }
1021
1022} 1074}
1023 1075
1024/** 1076/**
@@ -1176,7 +1228,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1176 if (ha->flags.msix_enabled) { 1228 if (ha->flags.msix_enabled) {
1177 msix = &ha->msix_entries[1]; 1229 msix = &ha->msix_entries[1];
1178 DEBUG2_17(printk(KERN_INFO 1230 DEBUG2_17(printk(KERN_INFO
1179 "Reistering vector 0x%x for base que\n", msix->entry)); 1231 "Registering vector 0x%x for base que\n", msix->entry));
1180 icb->msix = cpu_to_le16(msix->entry); 1232 icb->msix = cpu_to_le16(msix->entry);
1181 } 1233 }
1182 /* Use alternate PCI bus number */ 1234 /* Use alternate PCI bus number */
@@ -1230,14 +1282,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1230 spin_lock_irqsave(&ha->hardware_lock, flags); 1282 spin_lock_irqsave(&ha->hardware_lock, flags);
1231 1283
1232 /* Clear outstanding commands array. */ 1284 /* Clear outstanding commands array. */
1233 for (que = 0; que < ha->max_queues; que++) { 1285 for (que = 0; que < ha->max_req_queues; que++) {
1234 req = ha->req_q_map[que]; 1286 req = ha->req_q_map[que];
1235 if (!req) 1287 if (!req)
1236 continue; 1288 continue;
1237 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 1289 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1238 req->outstanding_cmds[cnt] = NULL; 1290 req->outstanding_cmds[cnt] = NULL;
1239 1291
1240 req->current_outstanding_cmd = 0; 1292 req->current_outstanding_cmd = 1;
1241 1293
1242 /* Initialize firmware. */ 1294 /* Initialize firmware. */
1243 req->ring_ptr = req->ring; 1295 req->ring_ptr = req->ring;
@@ -1245,13 +1297,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1245 req->cnt = req->length; 1297 req->cnt = req->length;
1246 } 1298 }
1247 1299
1248 for (que = 0; que < ha->max_queues; que++) { 1300 for (que = 0; que < ha->max_rsp_queues; que++) {
1249 rsp = ha->rsp_q_map[que]; 1301 rsp = ha->rsp_q_map[que];
1250 if (!rsp) 1302 if (!rsp)
1251 continue; 1303 continue;
1252 rsp->ring_ptr = rsp->ring;
1253 rsp->ring_index = 0;
1254
1255 /* Initialize response queue entries */ 1304 /* Initialize response queue entries */
1256 qla2x00_init_response_q_entries(rsp); 1305 qla2x00_init_response_q_entries(rsp);
1257 } 1306 }
@@ -1307,7 +1356,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1307 unsigned long wtime, mtime, cs84xx_time; 1356 unsigned long wtime, mtime, cs84xx_time;
1308 uint16_t min_wait; /* Minimum wait time if loop is down */ 1357 uint16_t min_wait; /* Minimum wait time if loop is down */
1309 uint16_t wait_time; /* Wait time if loop is coming ready */ 1358 uint16_t wait_time; /* Wait time if loop is coming ready */
1310 uint16_t state[3]; 1359 uint16_t state[5];
1311 struct qla_hw_data *ha = vha->hw; 1360 struct qla_hw_data *ha = vha->hw;
1312 1361
1313 rval = QLA_SUCCESS; 1362 rval = QLA_SUCCESS;
@@ -1406,8 +1455,9 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1406 vha->host_no, state[0], jiffies)); 1455 vha->host_no, state[0], jiffies));
1407 } while (1); 1456 } while (1);
1408 1457
1409 DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1458 DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n",
1410 vha->host_no, state[0], jiffies)); 1459 vha->host_no, state[0], state[1], state[2], state[3], state[4],
1460 jiffies));
1411 1461
1412 if (rval) { 1462 if (rval) {
1413 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", 1463 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
@@ -1541,6 +1591,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1541 char *st, *en; 1591 char *st, *en;
1542 uint16_t index; 1592 uint16_t index;
1543 struct qla_hw_data *ha = vha->hw; 1593 struct qla_hw_data *ha = vha->hw;
1594 int use_tbl = !IS_QLA25XX(ha) && !IS_QLA81XX(ha);
1544 1595
1545 if (memcmp(model, BINZERO, len) != 0) { 1596 if (memcmp(model, BINZERO, len) != 0) {
1546 strncpy(ha->model_number, model, len); 1597 strncpy(ha->model_number, model, len);
@@ -1553,14 +1604,16 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1553 } 1604 }
1554 1605
1555 index = (ha->pdev->subsystem_device & 0xff); 1606 index = (ha->pdev->subsystem_device & 0xff);
1556 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1607 if (use_tbl &&
1608 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1557 index < QLA_MODEL_NAMES) 1609 index < QLA_MODEL_NAMES)
1558 strncpy(ha->model_desc, 1610 strncpy(ha->model_desc,
1559 qla2x00_model_name[index * 2 + 1], 1611 qla2x00_model_name[index * 2 + 1],
1560 sizeof(ha->model_desc) - 1); 1612 sizeof(ha->model_desc) - 1);
1561 } else { 1613 } else {
1562 index = (ha->pdev->subsystem_device & 0xff); 1614 index = (ha->pdev->subsystem_device & 0xff);
1563 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1615 if (use_tbl &&
1616 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1564 index < QLA_MODEL_NAMES) { 1617 index < QLA_MODEL_NAMES) {
1565 strcpy(ha->model_number, 1618 strcpy(ha->model_number,
1566 qla2x00_model_name[index * 2]); 1619 qla2x00_model_name[index * 2]);
@@ -2061,8 +2114,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2061 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 2114 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2062 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2115 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2063 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2116 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2064 if (test_bit(RSCN_UPDATE, &save_flags)) 2117 if (test_bit(RSCN_UPDATE, &save_flags)) {
2065 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2118 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2119 vha->flags.rscn_queue_overflow = 1;
2120 }
2066 } 2121 }
2067 2122
2068 return (rval); 2123 return (rval);
@@ -2110,7 +2165,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2110 goto cleanup_allocation; 2165 goto cleanup_allocation;
2111 2166
2112 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", 2167 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
2113 ha->host_no, entries)); 2168 vha->host_no, entries));
2114 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list, 2169 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
2115 entries * sizeof(struct gid_list_info))); 2170 entries * sizeof(struct gid_list_info)));
2116 2171
@@ -2243,9 +2298,10 @@ static void
2243qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2298qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2244{ 2299{
2245#define LS_UNKNOWN 2 2300#define LS_UNKNOWN 2
2246 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 2301 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
2302 char *link_speed;
2247 int rval; 2303 int rval;
2248 uint16_t mb[6]; 2304 uint16_t mb[4];
2249 struct qla_hw_data *ha = vha->hw; 2305 struct qla_hw_data *ha = vha->hw;
2250 2306
2251 if (!IS_IIDMA_CAPABLE(ha)) 2307 if (!IS_IIDMA_CAPABLE(ha))
@@ -2266,10 +2322,15 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2266 fcport->port_name[6], fcport->port_name[7], rval, 2322 fcport->port_name[6], fcport->port_name[7], rval,
2267 fcport->fp_speed, mb[0], mb[1])); 2323 fcport->fp_speed, mb[0], mb[1]));
2268 } else { 2324 } else {
2325 link_speed = link_speeds[LS_UNKNOWN];
2326 if (fcport->fp_speed < 5)
2327 link_speed = link_speeds[fcport->fp_speed];
2328 else if (fcport->fp_speed == 0x13)
2329 link_speed = link_speeds[5];
2269 DEBUG2(qla_printk(KERN_INFO, ha, 2330 DEBUG2(qla_printk(KERN_INFO, ha,
2270 "iIDMA adjusted to %s GB/s on " 2331 "iIDMA adjusted to %s GB/s on "
2271 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", 2332 "%02x%02x%02x%02x%02x%02x%02x%02x.\n",
2272 link_speeds[fcport->fp_speed], fcport->port_name[0], 2333 link_speed, fcport->port_name[0],
2273 fcport->port_name[1], fcport->port_name[2], 2334 fcport->port_name[1], fcport->port_name[2],
2274 fcport->port_name[3], fcport->port_name[4], 2335 fcport->port_name[3], fcport->port_name[4],
2275 fcport->port_name[5], fcport->port_name[6], 2336 fcport->port_name[5], fcport->port_name[6],
@@ -3180,9 +3241,14 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3180{ 3241{
3181 int rval = QLA_SUCCESS; 3242 int rval = QLA_SUCCESS;
3182 uint32_t wait_time; 3243 uint32_t wait_time;
3183 struct qla_hw_data *ha = vha->hw; 3244 struct req_que *req;
3184 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 3245 struct rsp_que *rsp;
3185 struct rsp_que *rsp = req->rsp; 3246
3247 if (ql2xmultique_tag)
3248 req = vha->hw->req_q_map[0];
3249 else
3250 req = vha->req;
3251 rsp = req->rsp;
3186 3252
3187 atomic_set(&vha->loop_state, LOOP_UPDATE); 3253 atomic_set(&vha->loop_state, LOOP_UPDATE);
3188 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3254 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -3448,7 +3514,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
3448 int ret = -1; 3514 int ret = -1;
3449 int i; 3515 int i;
3450 3516
3451 for (i = 1; i < ha->max_queues; i++) { 3517 for (i = 1; i < ha->max_rsp_queues; i++) {
3452 rsp = ha->rsp_q_map[i]; 3518 rsp = ha->rsp_q_map[i];
3453 if (rsp) { 3519 if (rsp) {
3454 rsp->options &= ~BIT_0; 3520 rsp->options &= ~BIT_0;
@@ -3462,6 +3528,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
3462 "%s Rsp que:%d inited\n", __func__, 3528 "%s Rsp que:%d inited\n", __func__,
3463 rsp->id)); 3529 rsp->id));
3464 } 3530 }
3531 }
3532 for (i = 1; i < ha->max_req_queues; i++) {
3465 req = ha->req_q_map[i]; 3533 req = ha->req_q_map[i];
3466 if (req) { 3534 if (req) {
3467 /* Clear outstanding commands array. */ 3535 /* Clear outstanding commands array. */
@@ -3566,14 +3634,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3566 nv = ha->nvram; 3634 nv = ha->nvram;
3567 3635
3568 /* Determine NVRAM starting address. */ 3636 /* Determine NVRAM starting address. */
3569 ha->nvram_size = sizeof(struct nvram_24xx); 3637 if (ha->flags.port0) {
3570 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 3638 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
3571 ha->vpd_size = FA_NVRAM_VPD_SIZE; 3639 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
3572 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 3640 } else {
3573 if (PCI_FUNC(ha->pdev->devfn)) {
3574 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 3641 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
3575 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 3642 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
3576 } 3643 }
3644 ha->nvram_size = sizeof(struct nvram_24xx);
3645 ha->vpd_size = FA_NVRAM_VPD_SIZE;
3577 3646
3578 /* Get VPD data into cache */ 3647 /* Get VPD data into cache */
3579 ha->vpd = ha->nvram + VPD_OFFSET; 3648 ha->vpd = ha->nvram + VPD_OFFSET;
@@ -3587,7 +3656,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3587 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 3656 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
3588 chksum += le32_to_cpu(*dptr++); 3657 chksum += le32_to_cpu(*dptr++);
3589 3658
3590 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 3659 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
3591 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 3660 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
3592 3661
3593 /* Bad NVRAM data, set defaults parameters. */ 3662 /* Bad NVRAM data, set defaults parameters. */
@@ -3612,7 +3681,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3612 nv->exchange_count = __constant_cpu_to_le16(0); 3681 nv->exchange_count = __constant_cpu_to_le16(0);
3613 nv->hard_address = __constant_cpu_to_le16(124); 3682 nv->hard_address = __constant_cpu_to_le16(124);
3614 nv->port_name[0] = 0x21; 3683 nv->port_name[0] = 0x21;
3615 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); 3684 nv->port_name[1] = 0x00 + ha->port_no;
3616 nv->port_name[2] = 0x00; 3685 nv->port_name[2] = 0x00;
3617 nv->port_name[3] = 0xe0; 3686 nv->port_name[3] = 0xe0;
3618 nv->port_name[4] = 0x8b; 3687 nv->port_name[4] = 0x8b;
@@ -3798,11 +3867,11 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3798} 3867}
3799 3868
3800static int 3869static int
3801qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) 3870qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
3871 uint32_t faddr)
3802{ 3872{
3803 int rval = QLA_SUCCESS; 3873 int rval = QLA_SUCCESS;
3804 int segments, fragment; 3874 int segments, fragment;
3805 uint32_t faddr;
3806 uint32_t *dcode, dlen; 3875 uint32_t *dcode, dlen;
3807 uint32_t risc_addr; 3876 uint32_t risc_addr;
3808 uint32_t risc_size; 3877 uint32_t risc_size;
@@ -3811,12 +3880,11 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3811 struct req_que *req = ha->req_q_map[0]; 3880 struct req_que *req = ha->req_q_map[0];
3812 3881
3813 qla_printk(KERN_INFO, ha, 3882 qla_printk(KERN_INFO, ha,
3814 "FW: Loading from flash (%x)...\n", ha->flt_region_fw); 3883 "FW: Loading from flash (%x)...\n", faddr);
3815 3884
3816 rval = QLA_SUCCESS; 3885 rval = QLA_SUCCESS;
3817 3886
3818 segments = FA_RISC_CODE_SEGMENTS; 3887 segments = FA_RISC_CODE_SEGMENTS;
3819 faddr = ha->flt_region_fw;
3820 dcode = (uint32_t *)req->ring; 3888 dcode = (uint32_t *)req->ring;
3821 *srisc_addr = 0; 3889 *srisc_addr = 0;
3822 3890
@@ -4104,6 +4172,9 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4104{ 4172{
4105 int rval; 4173 int rval;
4106 4174
4175 if (ql2xfwloadbin == 1)
4176 return qla81xx_load_risc(vha, srisc_addr);
4177
4107 /* 4178 /*
4108 * FW Load priority: 4179 * FW Load priority:
4109 * 1) Firmware via request-firmware interface (.bin file). 4180 * 1) Firmware via request-firmware interface (.bin file).
@@ -4113,24 +4184,45 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4113 if (rval == QLA_SUCCESS) 4184 if (rval == QLA_SUCCESS)
4114 return rval; 4185 return rval;
4115 4186
4116 return qla24xx_load_risc_flash(vha, srisc_addr); 4187 return qla24xx_load_risc_flash(vha, srisc_addr,
4188 vha->hw->flt_region_fw);
4117} 4189}
4118 4190
4119int 4191int
4120qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4192qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4121{ 4193{
4122 int rval; 4194 int rval;
4195 struct qla_hw_data *ha = vha->hw;
4196
4197 if (ql2xfwloadbin == 2)
4198 goto try_blob_fw;
4123 4199
4124 /* 4200 /*
4125 * FW Load priority: 4201 * FW Load priority:
4126 * 1) Firmware residing in flash. 4202 * 1) Firmware residing in flash.
4127 * 2) Firmware via request-firmware interface (.bin file). 4203 * 2) Firmware via request-firmware interface (.bin file).
4204 * 3) Golden-Firmware residing in flash -- limited operation.
4128 */ 4205 */
4129 rval = qla24xx_load_risc_flash(vha, srisc_addr); 4206 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
4130 if (rval == QLA_SUCCESS) 4207 if (rval == QLA_SUCCESS)
4131 return rval; 4208 return rval;
4132 4209
4133 return qla24xx_load_risc_blob(vha, srisc_addr); 4210try_blob_fw:
4211 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4212 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4213 return rval;
4214
4215 qla_printk(KERN_ERR, ha,
4216 "FW: Attempting to fallback to golden firmware...\n");
4217 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4218 if (rval != QLA_SUCCESS)
4219 return rval;
4220
4221 qla_printk(KERN_ERR, ha,
4222 "FW: Please update operational firmware...\n");
4223 ha->flags.running_gold_fw = 1;
4224
4225 return rval;
4134} 4226}
4135 4227
4136void 4228void
@@ -4146,7 +4238,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4146 4238
4147 ret = qla2x00_stop_firmware(vha); 4239 ret = qla2x00_stop_firmware(vha);
4148 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 4240 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4149 retries ; retries--) { 4241 ret != QLA_INVALID_COMMAND && retries ; retries--) {
4150 ha->isp_ops->reset_chip(vha); 4242 ha->isp_ops->reset_chip(vha);
4151 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) 4243 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4152 continue; 4244 continue;
@@ -4165,13 +4257,19 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
4165 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4257 uint16_t mb[MAILBOX_REGISTER_COUNT];
4166 struct qla_hw_data *ha = vha->hw; 4258 struct qla_hw_data *ha = vha->hw;
4167 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4259 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4168 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 4260 struct req_que *req;
4169 struct rsp_que *rsp = req->rsp; 4261 struct rsp_que *rsp;
4170 4262
4171 if (!vha->vp_idx) 4263 if (!vha->vp_idx)
4172 return -EINVAL; 4264 return -EINVAL;
4173 4265
4174 rval = qla2x00_fw_ready(base_vha); 4266 rval = qla2x00_fw_ready(base_vha);
4267 if (ql2xmultique_tag)
4268 req = ha->req_q_map[0];
4269 else
4270 req = vha->req;
4271 rsp = req->rsp;
4272
4175 if (rval == QLA_SUCCESS) { 4273 if (rval == QLA_SUCCESS) {
4176 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 4274 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4177 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4275 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -4305,7 +4403,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4305 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 4403 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4306 chksum += le32_to_cpu(*dptr++); 4404 chksum += le32_to_cpu(*dptr++);
4307 4405
4308 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 4406 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
4309 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 4407 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
4310 4408
4311 /* Bad NVRAM data, set defaults parameters. */ 4409 /* Bad NVRAM data, set defaults parameters. */
@@ -4329,7 +4427,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4329 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 4427 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4330 nv->exchange_count = __constant_cpu_to_le16(0); 4428 nv->exchange_count = __constant_cpu_to_le16(0);
4331 nv->port_name[0] = 0x21; 4429 nv->port_name[0] = 0x21;
4332 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); 4430 nv->port_name[1] = 0x00 + ha->port_no;
4333 nv->port_name[2] = 0x00; 4431 nv->port_name[2] = 0x00;
4334 nv->port_name[3] = 0xe0; 4432 nv->port_name[3] = 0xe0;
4335 nv->port_name[4] = 0x8b; 4433 nv->port_name[4] = 0x8b;
@@ -4358,12 +4456,12 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4358 nv->max_luns_per_target = __constant_cpu_to_le16(128); 4456 nv->max_luns_per_target = __constant_cpu_to_le16(128);
4359 nv->port_down_retry_count = __constant_cpu_to_le16(30); 4457 nv->port_down_retry_count = __constant_cpu_to_le16(30);
4360 nv->link_down_timeout = __constant_cpu_to_le16(30); 4458 nv->link_down_timeout = __constant_cpu_to_le16(30);
4361 nv->enode_mac[0] = 0x01; 4459 nv->enode_mac[0] = 0x00;
4362 nv->enode_mac[1] = 0x02; 4460 nv->enode_mac[1] = 0x02;
4363 nv->enode_mac[2] = 0x03; 4461 nv->enode_mac[2] = 0x03;
4364 nv->enode_mac[3] = 0x04; 4462 nv->enode_mac[3] = 0x04;
4365 nv->enode_mac[4] = 0x05; 4463 nv->enode_mac[4] = 0x05;
4366 nv->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); 4464 nv->enode_mac[5] = 0x06 + ha->port_no;
4367 4465
4368 rval = 1; 4466 rval = 1;
4369 } 4467 }
@@ -4396,7 +4494,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4396 icb->enode_mac[2] = 0x03; 4494 icb->enode_mac[2] = 0x03;
4397 icb->enode_mac[3] = 0x04; 4495 icb->enode_mac[3] = 0x04;
4398 icb->enode_mac[4] = 0x05; 4496 icb->enode_mac[4] = 0x05;
4399 icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); 4497 icb->enode_mac[5] = 0x06 + ha->port_no;
4400 } 4498 }
4401 4499
4402 /* Use extended-initialization control block. */ 4500 /* Use extended-initialization control block. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a8abbb95730d..13396beae2ce 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -15,6 +15,7 @@ static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
15 struct rsp_que *rsp); 15 struct rsp_que *rsp);
16static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *); 16static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
17 17
18static void qla25xx_set_que(srb_t *, struct rsp_que **);
18/** 19/**
19 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 20 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
20 * @cmd: SCSI command 21 * @cmd: SCSI command
@@ -92,9 +93,10 @@ qla2x00_calc_iocbs_64(uint16_t dsds)
92 * Returns a pointer to the Continuation Type 0 IOCB packet. 93 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 */ 94 */
94static inline cont_entry_t * 95static inline cont_entry_t *
95qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha) 96qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96{ 97{
97 cont_entry_t *cont_pkt; 98 cont_entry_t *cont_pkt;
99 struct req_que *req = vha->req;
98 /* Adjust ring index. */ 100 /* Adjust ring index. */
99 req->ring_index++; 101 req->ring_index++;
100 if (req->ring_index == req->length) { 102 if (req->ring_index == req->length) {
@@ -120,10 +122,11 @@ qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
120 * Returns a pointer to the continuation type 1 IOCB packet. 122 * Returns a pointer to the continuation type 1 IOCB packet.
121 */ 123 */
122static inline cont_a64_entry_t * 124static inline cont_a64_entry_t *
123qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha) 125qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
124{ 126{
125 cont_a64_entry_t *cont_pkt; 127 cont_a64_entry_t *cont_pkt;
126 128
129 struct req_que *req = vha->req;
127 /* Adjust ring index. */ 130 /* Adjust ring index. */
128 req->ring_index++; 131 req->ring_index++;
129 if (req->ring_index == req->length) { 132 if (req->ring_index == req->length) {
@@ -159,7 +162,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
159 struct scsi_cmnd *cmd; 162 struct scsi_cmnd *cmd;
160 struct scatterlist *sg; 163 struct scatterlist *sg;
161 int i; 164 int i;
162 struct req_que *req;
163 165
164 cmd = sp->cmd; 166 cmd = sp->cmd;
165 167
@@ -174,8 +176,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
174 } 176 }
175 177
176 vha = sp->fcport->vha; 178 vha = sp->fcport->vha;
177 req = sp->que;
178
179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
180 180
181 /* Three DSDs are available in the Command Type 2 IOCB */ 181 /* Three DSDs are available in the Command Type 2 IOCB */
@@ -192,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192 * Seven DSDs are available in the Continuation 192 * Seven DSDs are available in the Continuation
193 * Type 0 IOCB. 193 * Type 0 IOCB.
194 */ 194 */
195 cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha); 195 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; 196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
197 avail_dsds = 7; 197 avail_dsds = 7;
198 } 198 }
@@ -220,7 +220,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
220 struct scsi_cmnd *cmd; 220 struct scsi_cmnd *cmd;
221 struct scatterlist *sg; 221 struct scatterlist *sg;
222 int i; 222 int i;
223 struct req_que *req;
224 223
225 cmd = sp->cmd; 224 cmd = sp->cmd;
226 225
@@ -235,8 +234,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
235 } 234 }
236 235
237 vha = sp->fcport->vha; 236 vha = sp->fcport->vha;
238 req = sp->que;
239
240 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 237 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
241 238
242 /* Two DSDs are available in the Command Type 3 IOCB */ 239 /* Two DSDs are available in the Command Type 3 IOCB */
@@ -254,7 +251,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
254 * Five DSDs are available in the Continuation 251 * Five DSDs are available in the Continuation
255 * Type 1 IOCB. 252 * Type 1 IOCB.
256 */ 253 */
257 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); 254 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
258 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 255 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
259 avail_dsds = 5; 256 avail_dsds = 5;
260 } 257 }
@@ -353,7 +350,6 @@ qla2x00_start_scsi(srb_t *sp)
353 /* Build command packet */ 350 /* Build command packet */
354 req->current_outstanding_cmd = handle; 351 req->current_outstanding_cmd = handle;
355 req->outstanding_cmds[handle] = sp; 352 req->outstanding_cmds[handle] = sp;
356 sp->que = req;
357 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 353 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
358 req->cnt -= req_cnt; 354 req->cnt -= req_cnt;
359 355
@@ -453,6 +449,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
453 mrk24->lun[2] = MSB(lun); 449 mrk24->lun[2] = MSB(lun);
454 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 450 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
455 mrk24->vp_index = vha->vp_idx; 451 mrk24->vp_index = vha->vp_idx;
452 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
456 } else { 453 } else {
457 SET_TARGET_ID(ha, mrk->target, loop_id); 454 SET_TARGET_ID(ha, mrk->target, loop_id);
458 mrk->lun = cpu_to_le16(lun); 455 mrk->lun = cpu_to_le16(lun);
@@ -531,9 +528,6 @@ qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
531 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++) 528 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
532 *dword_ptr++ = 0; 529 *dword_ptr++ = 0;
533 530
534 /* Set system defined field. */
535 pkt->sys_define = (uint8_t)req->ring_index;
536
537 /* Set entry count. */ 531 /* Set entry count. */
538 pkt->entry_count = 1; 532 pkt->entry_count = 1;
539 533
@@ -656,7 +650,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
656 } 650 }
657 651
658 vha = sp->fcport->vha; 652 vha = sp->fcport->vha;
659 req = sp->que; 653 req = vha->req;
660 654
661 /* Set transfer direction */ 655 /* Set transfer direction */
662 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 656 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -687,7 +681,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
687 * Five DSDs are available in the Continuation 681 * Five DSDs are available in the Continuation
688 * Type 1 IOCB. 682 * Type 1 IOCB.
689 */ 683 */
690 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); 684 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
691 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 685 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
692 avail_dsds = 5; 686 avail_dsds = 5;
693 } 687 }
@@ -724,19 +718,13 @@ qla24xx_start_scsi(srb_t *sp)
724 struct scsi_cmnd *cmd = sp->cmd; 718 struct scsi_cmnd *cmd = sp->cmd;
725 struct scsi_qla_host *vha = sp->fcport->vha; 719 struct scsi_qla_host *vha = sp->fcport->vha;
726 struct qla_hw_data *ha = vha->hw; 720 struct qla_hw_data *ha = vha->hw;
727 uint16_t que_id;
728 721
729 /* Setup device pointers. */ 722 /* Setup device pointers. */
730 ret = 0; 723 ret = 0;
731 que_id = vha->req_ques[0];
732 724
733 req = ha->req_q_map[que_id]; 725 qla25xx_set_que(sp, &rsp);
734 sp->que = req; 726 req = vha->req;
735 727
736 if (req->rsp)
737 rsp = req->rsp;
738 else
739 rsp = ha->rsp_q_map[que_id];
740 /* So we know we haven't pci_map'ed anything yet */ 728 /* So we know we haven't pci_map'ed anything yet */
741 tot_dsds = 0; 729 tot_dsds = 0;
742 730
@@ -794,7 +782,7 @@ qla24xx_start_scsi(srb_t *sp)
794 req->cnt -= req_cnt; 782 req->cnt -= req_cnt;
795 783
796 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 784 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
797 cmd_pkt->handle = handle; 785 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
798 786
799 /* Zero out remaining portion of packet. */ 787 /* Zero out remaining portion of packet. */
800 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 788 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -823,6 +811,8 @@ qla24xx_start_scsi(srb_t *sp)
823 811
824 /* Set total data segment count. */ 812 /* Set total data segment count. */
825 cmd_pkt->entry_count = (uint8_t)req_cnt; 813 cmd_pkt->entry_count = (uint8_t)req_cnt;
814 /* Specify response queue number where completion should happen */
815 cmd_pkt->entry_status = (uint8_t) rsp->id;
826 wmb(); 816 wmb();
827 817
828 /* Adjust ring index. */ 818 /* Adjust ring index. */
@@ -842,7 +832,7 @@ qla24xx_start_scsi(srb_t *sp)
842 /* Manage unprocessed RIO/ZIO commands in response queue. */ 832 /* Manage unprocessed RIO/ZIO commands in response queue. */
843 if (vha->flags.process_response_queue && 833 if (vha->flags.process_response_queue &&
844 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 834 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
845 qla24xx_process_response_queue(rsp); 835 qla24xx_process_response_queue(vha, rsp);
846 836
847 spin_unlock_irqrestore(&ha->hardware_lock, flags); 837 spin_unlock_irqrestore(&ha->hardware_lock, flags);
848 return QLA_SUCCESS; 838 return QLA_SUCCESS;
@@ -855,3 +845,16 @@ queuing_error:
855 845
856 return QLA_FUNCTION_FAILED; 846 return QLA_FUNCTION_FAILED;
857} 847}
848
849static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
850{
851 struct scsi_cmnd *cmd = sp->cmd;
852 struct qla_hw_data *ha = sp->fcport->vha->hw;
853 int affinity = cmd->request->cpu;
854
855 if (ql2xmultique_tag && affinity >= 0 &&
856 affinity < ha->max_rsp_queues - 1)
857 *rsp = ha->rsp_q_map[affinity + 1];
858 else
859 *rsp = ha->rsp_q_map[0];
860}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d04981848e56..245e7afb4c4d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -13,10 +13,9 @@ static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13static void qla2x00_process_completed_request(struct scsi_qla_host *, 13static void qla2x00_process_completed_request(struct scsi_qla_host *,
14 struct req_que *, uint32_t); 14 struct req_que *, uint32_t);
15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
16static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); 16static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18 sts_entry_t *); 18 sts_entry_t *);
19static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
20 19
21/** 20/**
22 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 21 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -38,6 +37,7 @@ qla2100_intr_handler(int irq, void *dev_id)
38 uint16_t hccr; 37 uint16_t hccr;
39 uint16_t mb[4]; 38 uint16_t mb[4];
40 struct rsp_que *rsp; 39 struct rsp_que *rsp;
40 unsigned long flags;
41 41
42 rsp = (struct rsp_que *) dev_id; 42 rsp = (struct rsp_que *) dev_id;
43 if (!rsp) { 43 if (!rsp) {
@@ -50,8 +50,8 @@ qla2100_intr_handler(int irq, void *dev_id)
50 reg = &ha->iobase->isp; 50 reg = &ha->iobase->isp;
51 status = 0; 51 status = 0;
52 52
53 spin_lock(&ha->hardware_lock); 53 spin_lock_irqsave(&ha->hardware_lock, flags);
54 vha = qla2x00_get_rsp_host(rsp); 54 vha = pci_get_drvdata(ha->pdev);
55 for (iter = 50; iter--; ) { 55 for (iter = 50; iter--; ) {
56 hccr = RD_REG_WORD(&reg->hccr); 56 hccr = RD_REG_WORD(&reg->hccr);
57 if (hccr & HCCR_RISC_PAUSE) { 57 if (hccr & HCCR_RISC_PAUSE) {
@@ -102,7 +102,7 @@ qla2100_intr_handler(int irq, void *dev_id)
102 RD_REG_WORD(&reg->hccr); 102 RD_REG_WORD(&reg->hccr);
103 } 103 }
104 } 104 }
105 spin_unlock(&ha->hardware_lock); 105 spin_unlock_irqrestore(&ha->hardware_lock, flags);
106 106
107 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 107 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
108 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 108 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -134,6 +134,7 @@ qla2300_intr_handler(int irq, void *dev_id)
134 uint16_t mb[4]; 134 uint16_t mb[4];
135 struct rsp_que *rsp; 135 struct rsp_que *rsp;
136 struct qla_hw_data *ha; 136 struct qla_hw_data *ha;
137 unsigned long flags;
137 138
138 rsp = (struct rsp_que *) dev_id; 139 rsp = (struct rsp_que *) dev_id;
139 if (!rsp) { 140 if (!rsp) {
@@ -146,8 +147,8 @@ qla2300_intr_handler(int irq, void *dev_id)
146 reg = &ha->iobase->isp; 147 reg = &ha->iobase->isp;
147 status = 0; 148 status = 0;
148 149
149 spin_lock(&ha->hardware_lock); 150 spin_lock_irqsave(&ha->hardware_lock, flags);
150 vha = qla2x00_get_rsp_host(rsp); 151 vha = pci_get_drvdata(ha->pdev);
151 for (iter = 50; iter--; ) { 152 for (iter = 50; iter--; ) {
152 stat = RD_REG_DWORD(&reg->u.isp2300.host_status); 153 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
153 if (stat & HSR_RISC_PAUSED) { 154 if (stat & HSR_RISC_PAUSED) {
@@ -217,7 +218,7 @@ qla2300_intr_handler(int irq, void *dev_id)
217 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 218 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
218 RD_REG_WORD_RELAXED(&reg->hccr); 219 RD_REG_WORD_RELAXED(&reg->hccr);
219 } 220 }
220 spin_unlock(&ha->hardware_lock); 221 spin_unlock_irqrestore(&ha->hardware_lock, flags);
221 222
222 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 223 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
223 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 224 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -685,7 +686,7 @@ skip_rio:
685 vha->host_no)); 686 vha->host_no));
686 687
687 if (IS_FWI2_CAPABLE(ha)) 688 if (IS_FWI2_CAPABLE(ha))
688 qla24xx_process_response_queue(rsp); 689 qla24xx_process_response_queue(vha, rsp);
689 else 690 else
690 qla2x00_process_response_queue(rsp); 691 qla2x00_process_response_queue(rsp);
691 break; 692 break;
@@ -766,7 +767,10 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
766 struct qla_hw_data *ha = vha->hw; 767 struct qla_hw_data *ha = vha->hw;
767 struct req_que *req = NULL; 768 struct req_que *req = NULL;
768 769
769 req = ha->req_q_map[vha->req_ques[0]]; 770 if (!ql2xqfulltracking)
771 return;
772
773 req = vha->req;
770 if (!req) 774 if (!req)
771 return; 775 return;
772 if (req->max_q_depth <= sdev->queue_depth) 776 if (req->max_q_depth <= sdev->queue_depth)
@@ -808,6 +812,9 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
808 fc_port_t *fcport; 812 fc_port_t *fcport;
809 struct scsi_device *sdev; 813 struct scsi_device *sdev;
810 814
815 if (!ql2xqfulltracking)
816 return;
817
811 sdev = sp->cmd->device; 818 sdev = sp->cmd->device;
812 if (sdev->queue_depth >= req->max_q_depth) 819 if (sdev->queue_depth >= req->max_q_depth)
813 return; 820 return;
@@ -858,8 +865,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
858 qla2x00_ramp_up_queue_depth(vha, req, sp); 865 qla2x00_ramp_up_queue_depth(vha, req, sp);
859 qla2x00_sp_compl(ha, sp); 866 qla2x00_sp_compl(ha, sp);
860 } else { 867 } else {
861 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", 868 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
862 vha->host_no)); 869 " handle(%d)\n", vha->host_no, req->id, index));
863 qla_printk(KERN_WARNING, ha, 870 qla_printk(KERN_WARNING, ha,
864 "Invalid ISP SCSI completion handle\n"); 871 "Invalid ISP SCSI completion handle\n");
865 872
@@ -881,7 +888,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
881 uint16_t handle_cnt; 888 uint16_t handle_cnt;
882 uint16_t cnt; 889 uint16_t cnt;
883 890
884 vha = qla2x00_get_rsp_host(rsp); 891 vha = pci_get_drvdata(ha->pdev);
885 892
886 if (!vha->flags.online) 893 if (!vha->flags.online)
887 return; 894 return;
@@ -926,7 +933,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
926 } 933 }
927 break; 934 break;
928 case STATUS_CONT_TYPE: 935 case STATUS_CONT_TYPE:
929 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); 936 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
930 break; 937 break;
931 default: 938 default:
932 /* Type Not Supported. */ 939 /* Type Not Supported. */
@@ -945,7 +952,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
945} 952}
946 953
947static inline void 954static inline void
948qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len) 955qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len,
956 struct rsp_que *rsp)
949{ 957{
950 struct scsi_cmnd *cp = sp->cmd; 958 struct scsi_cmnd *cp = sp->cmd;
951 959
@@ -962,7 +970,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
962 sp->request_sense_ptr += sense_len; 970 sp->request_sense_ptr += sense_len;
963 sp->request_sense_length -= sense_len; 971 sp->request_sense_length -= sense_len;
964 if (sp->request_sense_length != 0) 972 if (sp->request_sense_length != 0)
965 sp->fcport->vha->status_srb = sp; 973 rsp->status_srb = sp;
966 974
967 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 975 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
968 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no, 976 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
@@ -992,7 +1000,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
992 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; 1000 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
993 uint8_t *rsp_info, *sense_data; 1001 uint8_t *rsp_info, *sense_data;
994 struct qla_hw_data *ha = vha->hw; 1002 struct qla_hw_data *ha = vha->hw;
995 struct req_que *req = rsp->req; 1003 uint32_t handle;
1004 uint16_t que;
1005 struct req_que *req;
996 1006
997 sts = (sts_entry_t *) pkt; 1007 sts = (sts_entry_t *) pkt;
998 sts24 = (struct sts_entry_24xx *) pkt; 1008 sts24 = (struct sts_entry_24xx *) pkt;
@@ -1003,18 +1013,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1003 comp_status = le16_to_cpu(sts->comp_status); 1013 comp_status = le16_to_cpu(sts->comp_status);
1004 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1014 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1005 } 1015 }
1006 1016 handle = (uint32_t) LSW(sts->handle);
1017 que = MSW(sts->handle);
1018 req = ha->req_q_map[que];
1007 /* Fast path completion. */ 1019 /* Fast path completion. */
1008 if (comp_status == CS_COMPLETE && scsi_status == 0) { 1020 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1009 qla2x00_process_completed_request(vha, req, sts->handle); 1021 qla2x00_process_completed_request(vha, req, handle);
1010 1022
1011 return; 1023 return;
1012 } 1024 }
1013 1025
1014 /* Validate handle. */ 1026 /* Validate handle. */
1015 if (sts->handle < MAX_OUTSTANDING_COMMANDS) { 1027 if (handle < MAX_OUTSTANDING_COMMANDS) {
1016 sp = req->outstanding_cmds[sts->handle]; 1028 sp = req->outstanding_cmds[handle];
1017 req->outstanding_cmds[sts->handle] = NULL; 1029 req->outstanding_cmds[handle] = NULL;
1018 } else 1030 } else
1019 sp = NULL; 1031 sp = NULL;
1020 1032
@@ -1030,7 +1042,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1030 cp = sp->cmd; 1042 cp = sp->cmd;
1031 if (cp == NULL) { 1043 if (cp == NULL) {
1032 DEBUG2(printk("scsi(%ld): Command already returned back to OS " 1044 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
1033 "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp)); 1045 "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp));
1034 qla_printk(KERN_WARNING, ha, 1046 qla_printk(KERN_WARNING, ha,
1035 "Command is NULL: already returned to OS (sp=%p)\n", sp); 1047 "Command is NULL: already returned to OS (sp=%p)\n", sp);
1036 1048
@@ -1121,6 +1133,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1121 scsi_status)); 1133 scsi_status));
1122 1134
1123 /* Adjust queue depth for all luns on the port. */ 1135 /* Adjust queue depth for all luns on the port. */
1136 if (!ql2xqfulltracking)
1137 break;
1124 fcport->last_queue_full = jiffies; 1138 fcport->last_queue_full = jiffies;
1125 starget_for_each_device(cp->device->sdev_target, 1139 starget_for_each_device(cp->device->sdev_target,
1126 fcport, qla2x00_adjust_sdev_qdepth_down); 1140 fcport, qla2x00_adjust_sdev_qdepth_down);
@@ -1133,7 +1147,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1133 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1147 if (!(scsi_status & SS_SENSE_LEN_VALID))
1134 break; 1148 break;
1135 1149
1136 qla2x00_handle_sense(sp, sense_data, sense_len); 1150 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1137 break; 1151 break;
1138 1152
1139 case CS_DATA_UNDERRUN: 1153 case CS_DATA_UNDERRUN:
@@ -1179,6 +1193,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1179 * Adjust queue depth for all luns on the 1193 * Adjust queue depth for all luns on the
1180 * port. 1194 * port.
1181 */ 1195 */
1196 if (!ql2xqfulltracking)
1197 break;
1182 fcport->last_queue_full = jiffies; 1198 fcport->last_queue_full = jiffies;
1183 starget_for_each_device( 1199 starget_for_each_device(
1184 cp->device->sdev_target, fcport, 1200 cp->device->sdev_target, fcport,
@@ -1192,12 +1208,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1192 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1208 if (!(scsi_status & SS_SENSE_LEN_VALID))
1193 break; 1209 break;
1194 1210
1195 qla2x00_handle_sense(sp, sense_data, sense_len); 1211 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1196 } else { 1212 } else {
1197 /* 1213 /*
1198 * If RISC reports underrun and target does not report 1214 * If RISC reports underrun and target does not report
1199 * it then we must have a lost frame, so tell upper 1215 * it then we must have a lost frame, so tell upper
1200 * layer to retry it by reporting a bus busy. 1216 * layer to retry it by reporting an error.
1201 */ 1217 */
1202 if (!(scsi_status & SS_RESIDUAL_UNDER)) { 1218 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1203 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " 1219 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
@@ -1207,7 +1223,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1207 cp->device->id, cp->device->lun, resid, 1223 cp->device->id, cp->device->lun, resid,
1208 scsi_bufflen(cp))); 1224 scsi_bufflen(cp)));
1209 1225
1210 cp->result = DID_BUS_BUSY << 16; 1226 cp->result = DID_ERROR << 16;
1211 break; 1227 break;
1212 } 1228 }
1213 1229
@@ -1334,7 +1350,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1334 } 1350 }
1335 1351
1336 /* Place command on done queue. */ 1352 /* Place command on done queue. */
1337 if (vha->status_srb == NULL) 1353 if (rsp->status_srb == NULL)
1338 qla2x00_sp_compl(ha, sp); 1354 qla2x00_sp_compl(ha, sp);
1339} 1355}
1340 1356
@@ -1346,11 +1362,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1346 * Extended sense data. 1362 * Extended sense data.
1347 */ 1363 */
1348static void 1364static void
1349qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) 1365qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1350{ 1366{
1351 uint8_t sense_sz = 0; 1367 uint8_t sense_sz = 0;
1352 struct qla_hw_data *ha = vha->hw; 1368 struct qla_hw_data *ha = rsp->hw;
1353 srb_t *sp = vha->status_srb; 1369 srb_t *sp = rsp->status_srb;
1354 struct scsi_cmnd *cp; 1370 struct scsi_cmnd *cp;
1355 1371
1356 if (sp != NULL && sp->request_sense_length != 0) { 1372 if (sp != NULL && sp->request_sense_length != 0) {
@@ -1362,7 +1378,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1362 "cmd is NULL: already returned to OS (sp=%p)\n", 1378 "cmd is NULL: already returned to OS (sp=%p)\n",
1363 sp); 1379 sp);
1364 1380
1365 vha->status_srb = NULL; 1381 rsp->status_srb = NULL;
1366 return; 1382 return;
1367 } 1383 }
1368 1384
@@ -1383,7 +1399,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1383 1399
1384 /* Place command on done queue. */ 1400 /* Place command on done queue. */
1385 if (sp->request_sense_length == 0) { 1401 if (sp->request_sense_length == 0) {
1386 vha->status_srb = NULL; 1402 rsp->status_srb = NULL;
1387 qla2x00_sp_compl(ha, sp); 1403 qla2x00_sp_compl(ha, sp);
1388 } 1404 }
1389 } 1405 }
@@ -1399,7 +1415,9 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1399{ 1415{
1400 srb_t *sp; 1416 srb_t *sp;
1401 struct qla_hw_data *ha = vha->hw; 1417 struct qla_hw_data *ha = vha->hw;
1402 struct req_que *req = rsp->req; 1418 uint32_t handle = LSW(pkt->handle);
1419 uint16_t que = MSW(pkt->handle);
1420 struct req_que *req = ha->req_q_map[que];
1403#if defined(QL_DEBUG_LEVEL_2) 1421#if defined(QL_DEBUG_LEVEL_2)
1404 if (pkt->entry_status & RF_INV_E_ORDER) 1422 if (pkt->entry_status & RF_INV_E_ORDER)
1405 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1423 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
@@ -1417,14 +1435,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1417#endif 1435#endif
1418 1436
1419 /* Validate handle. */ 1437 /* Validate handle. */
1420 if (pkt->handle < MAX_OUTSTANDING_COMMANDS) 1438 if (handle < MAX_OUTSTANDING_COMMANDS)
1421 sp = req->outstanding_cmds[pkt->handle]; 1439 sp = req->outstanding_cmds[handle];
1422 else 1440 else
1423 sp = NULL; 1441 sp = NULL;
1424 1442
1425 if (sp) { 1443 if (sp) {
1426 /* Free outstanding command slot. */ 1444 /* Free outstanding command slot. */
1427 req->outstanding_cmds[pkt->handle] = NULL; 1445 req->outstanding_cmds[handle] = NULL;
1428 1446
1429 /* Bad payload or header */ 1447 /* Bad payload or header */
1430 if (pkt->entry_status & 1448 if (pkt->entry_status &
@@ -1486,13 +1504,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1486 * qla24xx_process_response_queue() - Process response queue entries. 1504 * qla24xx_process_response_queue() - Process response queue entries.
1487 * @ha: SCSI driver HA context 1505 * @ha: SCSI driver HA context
1488 */ 1506 */
1489void 1507void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1490qla24xx_process_response_queue(struct rsp_que *rsp) 1508 struct rsp_que *rsp)
1491{ 1509{
1492 struct sts_entry_24xx *pkt; 1510 struct sts_entry_24xx *pkt;
1493 struct scsi_qla_host *vha;
1494
1495 vha = qla2x00_get_rsp_host(rsp);
1496 1511
1497 if (!vha->flags.online) 1512 if (!vha->flags.online)
1498 return; 1513 return;
@@ -1523,7 +1538,7 @@ qla24xx_process_response_queue(struct rsp_que *rsp)
1523 qla2x00_status_entry(vha, rsp, pkt); 1538 qla2x00_status_entry(vha, rsp, pkt);
1524 break; 1539 break;
1525 case STATUS_CONT_TYPE: 1540 case STATUS_CONT_TYPE:
1526 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); 1541 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1527 break; 1542 break;
1528 case VP_RPT_ID_IOCB_TYPE: 1543 case VP_RPT_ID_IOCB_TYPE:
1529 qla24xx_report_id_acquisition(vha, 1544 qla24xx_report_id_acquisition(vha,
@@ -1613,6 +1628,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1613 uint32_t hccr; 1628 uint32_t hccr;
1614 uint16_t mb[4]; 1629 uint16_t mb[4];
1615 struct rsp_que *rsp; 1630 struct rsp_que *rsp;
1631 unsigned long flags;
1616 1632
1617 rsp = (struct rsp_que *) dev_id; 1633 rsp = (struct rsp_que *) dev_id;
1618 if (!rsp) { 1634 if (!rsp) {
@@ -1625,8 +1641,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
1625 reg = &ha->iobase->isp24; 1641 reg = &ha->iobase->isp24;
1626 status = 0; 1642 status = 0;
1627 1643
1628 spin_lock(&ha->hardware_lock); 1644 spin_lock_irqsave(&ha->hardware_lock, flags);
1629 vha = qla2x00_get_rsp_host(rsp); 1645 vha = pci_get_drvdata(ha->pdev);
1630 for (iter = 50; iter--; ) { 1646 for (iter = 50; iter--; ) {
1631 stat = RD_REG_DWORD(&reg->host_status); 1647 stat = RD_REG_DWORD(&reg->host_status);
1632 if (stat & HSRX_RISC_PAUSED) { 1648 if (stat & HSRX_RISC_PAUSED) {
@@ -1664,7 +1680,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1664 break; 1680 break;
1665 case 0x13: 1681 case 0x13:
1666 case 0x14: 1682 case 0x14:
1667 qla24xx_process_response_queue(rsp); 1683 qla24xx_process_response_queue(vha, rsp);
1668 break; 1684 break;
1669 default: 1685 default:
1670 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1686 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1675,7 +1691,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1675 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1691 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1676 RD_REG_DWORD_RELAXED(&reg->hccr); 1692 RD_REG_DWORD_RELAXED(&reg->hccr);
1677 } 1693 }
1678 spin_unlock(&ha->hardware_lock); 1694 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1679 1695
1680 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 1696 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1681 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 1697 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -1692,6 +1708,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1692 struct qla_hw_data *ha; 1708 struct qla_hw_data *ha;
1693 struct rsp_que *rsp; 1709 struct rsp_que *rsp;
1694 struct device_reg_24xx __iomem *reg; 1710 struct device_reg_24xx __iomem *reg;
1711 struct scsi_qla_host *vha;
1695 1712
1696 rsp = (struct rsp_que *) dev_id; 1713 rsp = (struct rsp_que *) dev_id;
1697 if (!rsp) { 1714 if (!rsp) {
@@ -1704,7 +1721,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1704 1721
1705 spin_lock_irq(&ha->hardware_lock); 1722 spin_lock_irq(&ha->hardware_lock);
1706 1723
1707 qla24xx_process_response_queue(rsp); 1724 vha = qla25xx_get_host(rsp);
1725 qla24xx_process_response_queue(vha, rsp);
1708 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1726 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1709 1727
1710 spin_unlock_irq(&ha->hardware_lock); 1728 spin_unlock_irq(&ha->hardware_lock);
@@ -1717,7 +1735,6 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1717{ 1735{
1718 struct qla_hw_data *ha; 1736 struct qla_hw_data *ha;
1719 struct rsp_que *rsp; 1737 struct rsp_que *rsp;
1720 struct device_reg_24xx __iomem *reg;
1721 1738
1722 rsp = (struct rsp_que *) dev_id; 1739 rsp = (struct rsp_que *) dev_id;
1723 if (!rsp) { 1740 if (!rsp) {
@@ -1726,13 +1743,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1726 return IRQ_NONE; 1743 return IRQ_NONE;
1727 } 1744 }
1728 ha = rsp->hw; 1745 ha = rsp->hw;
1729 reg = &ha->iobase->isp24;
1730 1746
1731 spin_lock_irq(&ha->hardware_lock); 1747 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
1732
1733 qla24xx_process_response_queue(rsp);
1734
1735 spin_unlock_irq(&ha->hardware_lock);
1736 1748
1737 return IRQ_HANDLED; 1749 return IRQ_HANDLED;
1738} 1750}
@@ -1760,7 +1772,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1760 status = 0; 1772 status = 0;
1761 1773
1762 spin_lock_irq(&ha->hardware_lock); 1774 spin_lock_irq(&ha->hardware_lock);
1763 vha = qla2x00_get_rsp_host(rsp); 1775 vha = pci_get_drvdata(ha->pdev);
1764 do { 1776 do {
1765 stat = RD_REG_DWORD(&reg->host_status); 1777 stat = RD_REG_DWORD(&reg->host_status);
1766 if (stat & HSRX_RISC_PAUSED) { 1778 if (stat & HSRX_RISC_PAUSED) {
@@ -1798,7 +1810,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1798 break; 1810 break;
1799 case 0x13: 1811 case 0x13:
1800 case 0x14: 1812 case 0x14:
1801 qla24xx_process_response_queue(rsp); 1813 qla24xx_process_response_queue(vha, rsp);
1802 break; 1814 break;
1803 default: 1815 default:
1804 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1816 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1822,31 +1834,14 @@ qla24xx_msix_default(int irq, void *dev_id)
1822/* Interrupt handling helpers. */ 1834/* Interrupt handling helpers. */
1823 1835
1824struct qla_init_msix_entry { 1836struct qla_init_msix_entry {
1825 uint16_t entry;
1826 uint16_t index;
1827 const char *name; 1837 const char *name;
1828 irq_handler_t handler; 1838 irq_handler_t handler;
1829}; 1839};
1830 1840
1831static struct qla_init_msix_entry base_queue = { 1841static struct qla_init_msix_entry msix_entries[3] = {
1832 .entry = 0, 1842 { "qla2xxx (default)", qla24xx_msix_default },
1833 .index = 0, 1843 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
1834 .name = "qla2xxx (default)", 1844 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
1835 .handler = qla24xx_msix_default,
1836};
1837
1838static struct qla_init_msix_entry base_rsp_queue = {
1839 .entry = 1,
1840 .index = 1,
1841 .name = "qla2xxx (rsp_q)",
1842 .handler = qla24xx_msix_rsp_q,
1843};
1844
1845static struct qla_init_msix_entry multi_rsp_queue = {
1846 .entry = 1,
1847 .index = 1,
1848 .name = "qla2xxx (multi_q)",
1849 .handler = qla25xx_msix_rsp_q,
1850}; 1845};
1851 1846
1852static void 1847static void
@@ -1873,7 +1868,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1873 int i, ret; 1868 int i, ret;
1874 struct msix_entry *entries; 1869 struct msix_entry *entries;
1875 struct qla_msix_entry *qentry; 1870 struct qla_msix_entry *qentry;
1876 struct qla_init_msix_entry *msix_queue;
1877 1871
1878 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 1872 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1879 GFP_KERNEL); 1873 GFP_KERNEL);
@@ -1900,7 +1894,7 @@ msix_failed:
1900 ha->msix_count, ret); 1894 ha->msix_count, ret);
1901 goto msix_out; 1895 goto msix_out;
1902 } 1896 }
1903 ha->max_queues = ha->msix_count - 1; 1897 ha->max_rsp_queues = ha->msix_count - 1;
1904 } 1898 }
1905 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 1899 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1906 ha->msix_count, GFP_KERNEL); 1900 ha->msix_count, GFP_KERNEL);
@@ -1918,45 +1912,27 @@ msix_failed:
1918 qentry->rsp = NULL; 1912 qentry->rsp = NULL;
1919 } 1913 }
1920 1914
1921 /* Enable MSI-X for AENs for queue 0 */ 1915 /* Enable MSI-X vectors for the base queue */
1922 qentry = &ha->msix_entries[0]; 1916 for (i = 0; i < 2; i++) {
1923 ret = request_irq(qentry->vector, base_queue.handler, 0, 1917 qentry = &ha->msix_entries[i];
1924 base_queue.name, rsp); 1918 ret = request_irq(qentry->vector, msix_entries[i].handler,
1925 if (ret) { 1919 0, msix_entries[i].name, rsp);
1926 qla_printk(KERN_WARNING, ha, 1920 if (ret) {
1921 qla_printk(KERN_WARNING, ha,
1927 "MSI-X: Unable to register handler -- %x/%d.\n", 1922 "MSI-X: Unable to register handler -- %x/%d.\n",
1928 qentry->vector, ret); 1923 qentry->vector, ret);
1929 qla24xx_disable_msix(ha); 1924 qla24xx_disable_msix(ha);
1930 goto msix_out; 1925 ha->mqenable = 0;
1926 goto msix_out;
1927 }
1928 qentry->have_irq = 1;
1929 qentry->rsp = rsp;
1930 rsp->msix = qentry;
1931 } 1931 }
1932 qentry->have_irq = 1;
1933 qentry->rsp = rsp;
1934 1932
1935 /* Enable MSI-X vector for response queue update for queue 0 */ 1933 /* Enable MSI-X vector for response queue update for queue 0 */
1936 if (ha->max_queues > 1 && ha->mqiobase) { 1934 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
1937 ha->mqenable = 1; 1935 ha->mqenable = 1;
1938 msix_queue = &multi_rsp_queue;
1939 qla_printk(KERN_INFO, ha,
1940 "MQ enabled, Number of Queue Resources: %d \n",
1941 ha->max_queues);
1942 } else {
1943 ha->mqenable = 0;
1944 msix_queue = &base_rsp_queue;
1945 }
1946
1947 qentry = &ha->msix_entries[1];
1948 ret = request_irq(qentry->vector, msix_queue->handler, 0,
1949 msix_queue->name, rsp);
1950 if (ret) {
1951 qla_printk(KERN_WARNING, ha,
1952 "MSI-X: Unable to register handler -- %x/%d.\n",
1953 qentry->vector, ret);
1954 qla24xx_disable_msix(ha);
1955 ha->mqenable = 0;
1956 goto msix_out;
1957 }
1958 qentry->have_irq = 1;
1959 qentry->rsp = rsp;
1960 1936
1961msix_out: 1937msix_out:
1962 kfree(entries); 1938 kfree(entries);
@@ -2063,35 +2039,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
2063 } 2039 }
2064} 2040}
2065 2041
2066static struct scsi_qla_host *
2067qla2x00_get_rsp_host(struct rsp_que *rsp)
2068{
2069 srb_t *sp;
2070 struct qla_hw_data *ha = rsp->hw;
2071 struct scsi_qla_host *vha = NULL;
2072 struct sts_entry_24xx *pkt;
2073 struct req_que *req;
2074
2075 if (rsp->id) {
2076 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2077 req = rsp->req;
2078 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2079 sp = req->outstanding_cmds[pkt->handle];
2080 if (sp)
2081 vha = sp->fcport->vha;
2082 }
2083 }
2084 if (!vha)
2085 /* handle it in base queue */
2086 vha = pci_get_drvdata(ha->pdev);
2087
2088 return vha;
2089}
2090 2042
2091int qla25xx_request_irq(struct rsp_que *rsp) 2043int qla25xx_request_irq(struct rsp_que *rsp)
2092{ 2044{
2093 struct qla_hw_data *ha = rsp->hw; 2045 struct qla_hw_data *ha = rsp->hw;
2094 struct qla_init_msix_entry *intr = &multi_rsp_queue; 2046 struct qla_init_msix_entry *intr = &msix_entries[2];
2095 struct qla_msix_entry *msix = rsp->msix; 2047 struct qla_msix_entry *msix = rsp->msix;
2096 int ret; 2048 int ret;
2097 2049
@@ -2106,3 +2058,30 @@ int qla25xx_request_irq(struct rsp_que *rsp)
2106 msix->rsp = rsp; 2058 msix->rsp = rsp;
2107 return ret; 2059 return ret;
2108} 2060}
2061
2062struct scsi_qla_host *
2063qla25xx_get_host(struct rsp_que *rsp)
2064{
2065 srb_t *sp;
2066 struct qla_hw_data *ha = rsp->hw;
2067 struct scsi_qla_host *vha = NULL;
2068 struct sts_entry_24xx *pkt;
2069 struct req_que *req;
2070 uint16_t que;
2071 uint32_t handle;
2072
2073 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2074 que = MSW(pkt->handle);
2075 handle = (uint32_t) LSW(pkt->handle);
2076 req = ha->req_q_map[que];
2077 if (handle < MAX_OUTSTANDING_COMMANDS) {
2078 sp = req->outstanding_cmds[handle];
2079 if (sp)
2080 return sp->fcport->vha;
2081 else
2082 goto base_que;
2083 }
2084base_que:
2085 vha = pci_get_drvdata(ha->pdev);
2086 return vha;
2087}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index e67c1660bf46..fe69f3057671 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -408,7 +408,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
408 * Context: 408 * Context:
409 * Kernel context. 409 * Kernel context.
410 */ 410 */
411void 411int
412qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, 412qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
413 uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi, 413 uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
414 uint32_t *mpi_caps, uint8_t *phy) 414 uint32_t *mpi_caps, uint8_t *phy)
@@ -427,6 +427,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
427 mcp->flags = 0; 427 mcp->flags = 0;
428 mcp->tov = MBX_TOV_SECONDS; 428 mcp->tov = MBX_TOV_SECONDS;
429 rval = qla2x00_mailbox_command(vha, mcp); 429 rval = qla2x00_mailbox_command(vha, mcp);
430 if (rval != QLA_SUCCESS)
431 goto failed;
430 432
431 /* Return mailbox data. */ 433 /* Return mailbox data. */
432 *major = mcp->mb[1]; 434 *major = mcp->mb[1];
@@ -446,7 +448,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
446 phy[1] = mcp->mb[9] >> 8; 448 phy[1] = mcp->mb[9] >> 8;
447 phy[2] = mcp->mb[9] & 0xff; 449 phy[2] = mcp->mb[9] & 0xff;
448 } 450 }
449 451failed:
450 if (rval != QLA_SUCCESS) { 452 if (rval != QLA_SUCCESS) {
451 /*EMPTY*/ 453 /*EMPTY*/
452 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 454 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
@@ -455,6 +457,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
455 /*EMPTY*/ 457 /*EMPTY*/
456 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 458 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
457 } 459 }
460 return rval;
458} 461}
459 462
460/* 463/*
@@ -748,20 +751,20 @@ qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
748 * Kernel context. 751 * Kernel context.
749 */ 752 */
750int 753int
751qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) 754qla2x00_abort_command(srb_t *sp)
752{ 755{
753 unsigned long flags = 0; 756 unsigned long flags = 0;
754 fc_port_t *fcport;
755 int rval; 757 int rval;
756 uint32_t handle = 0; 758 uint32_t handle = 0;
757 mbx_cmd_t mc; 759 mbx_cmd_t mc;
758 mbx_cmd_t *mcp = &mc; 760 mbx_cmd_t *mcp = &mc;
761 fc_port_t *fcport = sp->fcport;
762 scsi_qla_host_t *vha = fcport->vha;
759 struct qla_hw_data *ha = vha->hw; 763 struct qla_hw_data *ha = vha->hw;
764 struct req_que *req = vha->req;
760 765
761 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); 766 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no));
762 767
763 fcport = sp->fcport;
764
765 spin_lock_irqsave(&ha->hardware_lock, flags); 768 spin_lock_irqsave(&ha->hardware_lock, flags);
766 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 769 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
767 if (req->outstanding_cmds[handle] == sp) 770 if (req->outstanding_cmds[handle] == sp)
@@ -800,7 +803,7 @@ qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
800} 803}
801 804
802int 805int
803qla2x00_abort_target(struct fc_port *fcport, unsigned int l) 806qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
804{ 807{
805 int rval, rval2; 808 int rval, rval2;
806 mbx_cmd_t mc; 809 mbx_cmd_t mc;
@@ -813,8 +816,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
813 816
814 l = l; 817 l = l;
815 vha = fcport->vha; 818 vha = fcport->vha;
816 req = vha->hw->req_q_map[0]; 819 req = vha->hw->req_q_map[tag];
817 rsp = vha->hw->rsp_q_map[0]; 820 rsp = vha->hw->rsp_q_map[tag];
818 mcp->mb[0] = MBC_ABORT_TARGET; 821 mcp->mb[0] = MBC_ABORT_TARGET;
819 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 822 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
820 if (HAS_EXTENDED_IDS(vha->hw)) { 823 if (HAS_EXTENDED_IDS(vha->hw)) {
@@ -850,7 +853,7 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
850} 853}
851 854
852int 855int
853qla2x00_lun_reset(struct fc_port *fcport, unsigned int l) 856qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
854{ 857{
855 int rval, rval2; 858 int rval, rval2;
856 mbx_cmd_t mc; 859 mbx_cmd_t mc;
@@ -862,8 +865,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
862 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); 865 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
863 866
864 vha = fcport->vha; 867 vha = fcport->vha;
865 req = vha->hw->req_q_map[0]; 868 req = vha->hw->req_q_map[tag];
866 rsp = vha->hw->rsp_q_map[0]; 869 rsp = vha->hw->rsp_q_map[tag];
867 mcp->mb[0] = MBC_LUN_RESET; 870 mcp->mb[0] = MBC_LUN_RESET;
868 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 871 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
869 if (HAS_EXTENDED_IDS(vha->hw)) 872 if (HAS_EXTENDED_IDS(vha->hw))
@@ -931,6 +934,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
931 mcp->mb[9] = vha->vp_idx; 934 mcp->mb[9] = vha->vp_idx;
932 mcp->out_mb = MBX_9|MBX_0; 935 mcp->out_mb = MBX_9|MBX_0;
933 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 936 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
937 if (IS_QLA81XX(vha->hw))
938 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
934 mcp->tov = MBX_TOV_SECONDS; 939 mcp->tov = MBX_TOV_SECONDS;
935 mcp->flags = 0; 940 mcp->flags = 0;
936 rval = qla2x00_mailbox_command(vha, mcp); 941 rval = qla2x00_mailbox_command(vha, mcp);
@@ -952,9 +957,19 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
952 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", 957 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n",
953 vha->host_no, rval)); 958 vha->host_no, rval));
954 } else { 959 } else {
955 /*EMPTY*/
956 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", 960 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
957 vha->host_no)); 961 vha->host_no));
962
963 if (IS_QLA81XX(vha->hw)) {
964 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
965 vha->fcoe_fcf_idx = mcp->mb[10];
966 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
967 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
968 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
969 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
970 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
971 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
972 }
958 } 973 }
959 974
960 return rval; 975 return rval;
@@ -1252,15 +1267,22 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1252 1267
1253 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1268 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1254 mcp->out_mb = MBX_0; 1269 mcp->out_mb = MBX_0;
1255 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1270 if (IS_FWI2_CAPABLE(vha->hw))
1271 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1272 else
1273 mcp->in_mb = MBX_1|MBX_0;
1256 mcp->tov = MBX_TOV_SECONDS; 1274 mcp->tov = MBX_TOV_SECONDS;
1257 mcp->flags = 0; 1275 mcp->flags = 0;
1258 rval = qla2x00_mailbox_command(vha, mcp); 1276 rval = qla2x00_mailbox_command(vha, mcp);
1259 1277
1260 /* Return firmware states. */ 1278 /* Return firmware states. */
1261 states[0] = mcp->mb[1]; 1279 states[0] = mcp->mb[1];
1262 states[1] = mcp->mb[2]; 1280 if (IS_FWI2_CAPABLE(vha->hw)) {
1263 states[2] = mcp->mb[3]; 1281 states[1] = mcp->mb[2];
1282 states[2] = mcp->mb[3];
1283 states[3] = mcp->mb[4];
1284 states[4] = mcp->mb[5];
1285 }
1264 1286
1265 if (rval != QLA_SUCCESS) { 1287 if (rval != QLA_SUCCESS) {
1266 /*EMPTY*/ 1288 /*EMPTY*/
@@ -1480,9 +1502,17 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1480 dma_addr_t lg_dma; 1502 dma_addr_t lg_dma;
1481 uint32_t iop[2]; 1503 uint32_t iop[2];
1482 struct qla_hw_data *ha = vha->hw; 1504 struct qla_hw_data *ha = vha->hw;
1505 struct req_que *req;
1506 struct rsp_que *rsp;
1483 1507
1484 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1508 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1485 1509
1510 if (ql2xmultique_tag)
1511 req = ha->req_q_map[0];
1512 else
1513 req = vha->req;
1514 rsp = req->rsp;
1515
1486 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1516 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1487 if (lg == NULL) { 1517 if (lg == NULL) {
1488 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", 1518 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n",
@@ -1493,6 +1523,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1493 1523
1494 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1524 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1495 lg->entry_count = 1; 1525 lg->entry_count = 1;
1526 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1496 lg->nport_handle = cpu_to_le16(loop_id); 1527 lg->nport_handle = cpu_to_le16(loop_id);
1497 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI); 1528 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
1498 if (opt & BIT_0) 1529 if (opt & BIT_0)
@@ -1741,6 +1772,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1741 struct logio_entry_24xx *lg; 1772 struct logio_entry_24xx *lg;
1742 dma_addr_t lg_dma; 1773 dma_addr_t lg_dma;
1743 struct qla_hw_data *ha = vha->hw; 1774 struct qla_hw_data *ha = vha->hw;
1775 struct req_que *req;
1776 struct rsp_que *rsp;
1744 1777
1745 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1778 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1746 1779
@@ -1752,8 +1785,14 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1752 } 1785 }
1753 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1786 memset(lg, 0, sizeof(struct logio_entry_24xx));
1754 1787
1788 if (ql2xmaxqueues > 1)
1789 req = ha->req_q_map[0];
1790 else
1791 req = vha->req;
1792 rsp = req->rsp;
1755 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1793 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1756 lg->entry_count = 1; 1794 lg->entry_count = 1;
1795 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1757 lg->nport_handle = cpu_to_le16(loop_id); 1796 lg->nport_handle = cpu_to_le16(loop_id);
1758 lg->control_flags = 1797 lg->control_flags =
1759 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 1798 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
@@ -1864,9 +1903,6 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
1864 mbx_cmd_t mc; 1903 mbx_cmd_t mc;
1865 mbx_cmd_t *mcp = &mc; 1904 mbx_cmd_t *mcp = &mc;
1866 1905
1867 if (IS_QLA81XX(vha->hw))
1868 return QLA_SUCCESS;
1869
1870 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", 1906 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
1871 vha->host_no)); 1907 vha->host_no));
1872 1908
@@ -2195,21 +2231,21 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2195} 2231}
2196 2232
2197int 2233int
2198qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) 2234qla24xx_abort_command(srb_t *sp)
2199{ 2235{
2200 int rval; 2236 int rval;
2201 fc_port_t *fcport;
2202 unsigned long flags = 0; 2237 unsigned long flags = 0;
2203 2238
2204 struct abort_entry_24xx *abt; 2239 struct abort_entry_24xx *abt;
2205 dma_addr_t abt_dma; 2240 dma_addr_t abt_dma;
2206 uint32_t handle; 2241 uint32_t handle;
2242 fc_port_t *fcport = sp->fcport;
2243 struct scsi_qla_host *vha = fcport->vha;
2207 struct qla_hw_data *ha = vha->hw; 2244 struct qla_hw_data *ha = vha->hw;
2245 struct req_que *req = vha->req;
2208 2246
2209 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2247 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2210 2248
2211 fcport = sp->fcport;
2212
2213 spin_lock_irqsave(&ha->hardware_lock, flags); 2249 spin_lock_irqsave(&ha->hardware_lock, flags);
2214 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2250 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
2215 if (req->outstanding_cmds[handle] == sp) 2251 if (req->outstanding_cmds[handle] == sp)
@@ -2231,6 +2267,7 @@ qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
2231 2267
2232 abt->entry_type = ABORT_IOCB_TYPE; 2268 abt->entry_type = ABORT_IOCB_TYPE;
2233 abt->entry_count = 1; 2269 abt->entry_count = 1;
2270 abt->handle = MAKE_HANDLE(req->id, abt->handle);
2234 abt->nport_handle = cpu_to_le16(fcport->loop_id); 2271 abt->nport_handle = cpu_to_le16(fcport->loop_id);
2235 abt->handle_to_abort = handle; 2272 abt->handle_to_abort = handle;
2236 abt->port_id[0] = fcport->d_id.b.al_pa; 2273 abt->port_id[0] = fcport->d_id.b.al_pa;
@@ -2272,7 +2309,7 @@ struct tsk_mgmt_cmd {
2272 2309
2273static int 2310static int
2274__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 2311__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2275 unsigned int l) 2312 unsigned int l, int tag)
2276{ 2313{
2277 int rval, rval2; 2314 int rval, rval2;
2278 struct tsk_mgmt_cmd *tsk; 2315 struct tsk_mgmt_cmd *tsk;
@@ -2286,8 +2323,11 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2286 2323
2287 vha = fcport->vha; 2324 vha = fcport->vha;
2288 ha = vha->hw; 2325 ha = vha->hw;
2289 req = ha->req_q_map[0]; 2326 req = vha->req;
2290 rsp = ha->rsp_q_map[0]; 2327 if (ql2xmultique_tag)
2328 rsp = ha->rsp_q_map[tag + 1];
2329 else
2330 rsp = req->rsp;
2291 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 2331 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2292 if (tsk == NULL) { 2332 if (tsk == NULL) {
2293 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " 2333 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
@@ -2298,6 +2338,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2298 2338
2299 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 2339 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
2300 tsk->p.tsk.entry_count = 1; 2340 tsk->p.tsk.entry_count = 1;
2341 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
2301 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 2342 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
2302 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 2343 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2303 tsk->p.tsk.control_flags = cpu_to_le32(type); 2344 tsk->p.tsk.control_flags = cpu_to_le32(type);
@@ -2344,15 +2385,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2344} 2385}
2345 2386
2346int 2387int
2347qla24xx_abort_target(struct fc_port *fcport, unsigned int l) 2388qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
2348{ 2389{
2349 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l); 2390 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
2350} 2391}
2351 2392
2352int 2393int
2353qla24xx_lun_reset(struct fc_port *fcport, unsigned int l) 2394qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
2354{ 2395{
2355 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l); 2396 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
2356} 2397}
2357 2398
2358int 2399int
@@ -2446,6 +2487,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2446 if (rval != QLA_SUCCESS) { 2487 if (rval != QLA_SUCCESS) {
2447 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2488 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2448 vha->host_no, rval)); 2489 vha->host_no, rval));
2490 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2491 rval = QLA_INVALID_COMMAND;
2449 } else { 2492 } else {
2450 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2493 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2451 } 2494 }
@@ -2659,10 +2702,13 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2659 mcp->mb[0] = MBC_PORT_PARAMS; 2702 mcp->mb[0] = MBC_PORT_PARAMS;
2660 mcp->mb[1] = loop_id; 2703 mcp->mb[1] = loop_id;
2661 mcp->mb[2] = BIT_0; 2704 mcp->mb[2] = BIT_0;
2662 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); 2705 if (IS_QLA81XX(vha->hw))
2663 mcp->mb[4] = mcp->mb[5] = 0; 2706 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
2664 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2707 else
2665 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 2708 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
2709 mcp->mb[9] = vha->vp_idx;
2710 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
2711 mcp->in_mb = MBX_3|MBX_1|MBX_0;
2666 mcp->tov = MBX_TOV_SECONDS; 2712 mcp->tov = MBX_TOV_SECONDS;
2667 mcp->flags = 0; 2713 mcp->flags = 0;
2668 rval = qla2x00_mailbox_command(vha, mcp); 2714 rval = qla2x00_mailbox_command(vha, mcp);
@@ -2672,8 +2718,6 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2672 mb[0] = mcp->mb[0]; 2718 mb[0] = mcp->mb[0];
2673 mb[1] = mcp->mb[1]; 2719 mb[1] = mcp->mb[1];
2674 mb[3] = mcp->mb[3]; 2720 mb[3] = mcp->mb[3];
2675 mb[4] = mcp->mb[4];
2676 mb[5] = mcp->mb[5];
2677 } 2721 }
2678 2722
2679 if (rval != QLA_SUCCESS) { 2723 if (rval != QLA_SUCCESS) {
@@ -2717,8 +2761,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2717 if (vp_idx == 0) 2761 if (vp_idx == 0)
2718 return; 2762 return;
2719 2763
2720 if (MSB(stat) == 1) 2764 if (MSB(stat) == 1) {
2765 DEBUG2(printk("scsi(%ld): Could not acquire ID for "
2766 "VP[%d].\n", vha->host_no, vp_idx));
2721 return; 2767 return;
2768 }
2722 2769
2723 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) 2770 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
2724 if (vp_idx == vp->vp_idx) 2771 if (vp_idx == vp->vp_idx)
@@ -3141,6 +3188,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3141 WRT_REG_DWORD(&reg->req_q_in, 0); 3188 WRT_REG_DWORD(&reg->req_q_in, 0);
3142 WRT_REG_DWORD(&reg->req_q_out, 0); 3189 WRT_REG_DWORD(&reg->req_q_out, 0);
3143 } 3190 }
3191 req->req_q_in = &reg->req_q_in;
3192 req->req_q_out = &reg->req_q_out;
3144 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3193 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3145 3194
3146 rval = qla2x00_mailbox_command(vha, mcp); 3195 rval = qla2x00_mailbox_command(vha, mcp);
@@ -3167,7 +3216,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3167 mcp->mb[6] = MSW(MSD(rsp->dma)); 3216 mcp->mb[6] = MSW(MSD(rsp->dma));
3168 mcp->mb[7] = LSW(MSD(rsp->dma)); 3217 mcp->mb[7] = LSW(MSD(rsp->dma));
3169 mcp->mb[5] = rsp->length; 3218 mcp->mb[5] = rsp->length;
3170 mcp->mb[11] = rsp->vp_idx;
3171 mcp->mb[14] = rsp->msix->entry; 3219 mcp->mb[14] = rsp->msix->entry;
3172 mcp->mb[13] = rsp->rid; 3220 mcp->mb[13] = rsp->rid;
3173 3221
@@ -3179,7 +3227,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3179 mcp->mb[8] = 0; 3227 mcp->mb[8] = 0;
3180 /* que out ptr index */ 3228 /* que out ptr index */
3181 mcp->mb[9] = 0; 3229 mcp->mb[9] = 0;
3182 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7 3230 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
3183 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3231 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3184 mcp->in_mb = MBX_0; 3232 mcp->in_mb = MBX_0;
3185 mcp->flags = MBX_DMA_OUT; 3233 mcp->flags = MBX_DMA_OUT;
@@ -3384,7 +3432,7 @@ qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
3384 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3432 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
3385 vha->host_no, rval, mcp->mb[0])); 3433 vha->host_no, rval, mcp->mb[0]));
3386 } else { 3434 } else {
3387 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 3435 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3388 } 3436 }
3389 3437
3390 return rval; 3438 return rval;
@@ -3428,3 +3476,141 @@ qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
3428 3476
3429 return rval; 3477 return rval;
3430} 3478}
3479
3480int
3481qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3482 uint16_t size_in_bytes, uint16_t *actual_size)
3483{
3484 int rval;
3485 mbx_cmd_t mc;
3486 mbx_cmd_t *mcp = &mc;
3487
3488 if (!IS_QLA81XX(vha->hw))
3489 return QLA_FUNCTION_FAILED;
3490
3491 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3492
3493 mcp->mb[0] = MBC_GET_XGMAC_STATS;
3494 mcp->mb[2] = MSW(stats_dma);
3495 mcp->mb[3] = LSW(stats_dma);
3496 mcp->mb[6] = MSW(MSD(stats_dma));
3497 mcp->mb[7] = LSW(MSD(stats_dma));
3498 mcp->mb[8] = size_in_bytes >> 2;
3499 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3500 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3501 mcp->tov = MBX_TOV_SECONDS;
3502 mcp->flags = 0;
3503 rval = qla2x00_mailbox_command(vha, mcp);
3504
3505 if (rval != QLA_SUCCESS) {
3506 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
3507 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
3508 mcp->mb[0], mcp->mb[1], mcp->mb[2]));
3509 } else {
3510 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3511
3512 *actual_size = mcp->mb[2] << 2;
3513 }
3514
3515 return rval;
3516}
3517
3518int
3519qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3520 uint16_t size)
3521{
3522 int rval;
3523 mbx_cmd_t mc;
3524 mbx_cmd_t *mcp = &mc;
3525
3526 if (!IS_QLA81XX(vha->hw))
3527 return QLA_FUNCTION_FAILED;
3528
3529 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3530
3531 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
3532 mcp->mb[1] = 0;
3533 mcp->mb[2] = MSW(tlv_dma);
3534 mcp->mb[3] = LSW(tlv_dma);
3535 mcp->mb[6] = MSW(MSD(tlv_dma));
3536 mcp->mb[7] = LSW(MSD(tlv_dma));
3537 mcp->mb[8] = size;
3538 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3539 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3540 mcp->tov = MBX_TOV_SECONDS;
3541 mcp->flags = 0;
3542 rval = qla2x00_mailbox_command(vha, mcp);
3543
3544 if (rval != QLA_SUCCESS) {
3545 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
3546 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
3547 mcp->mb[0], mcp->mb[1], mcp->mb[2]));
3548 } else {
3549 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3550 }
3551
3552 return rval;
3553}
3554
3555int
3556qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3557{
3558 int rval;
3559 mbx_cmd_t mc;
3560 mbx_cmd_t *mcp = &mc;
3561
3562 if (!IS_FWI2_CAPABLE(vha->hw))
3563 return QLA_FUNCTION_FAILED;
3564
3565 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3566
3567 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
3568 mcp->mb[1] = LSW(risc_addr);
3569 mcp->mb[8] = MSW(risc_addr);
3570 mcp->out_mb = MBX_8|MBX_1|MBX_0;
3571 mcp->in_mb = MBX_3|MBX_2|MBX_0;
3572 mcp->tov = 30;
3573 mcp->flags = 0;
3574 rval = qla2x00_mailbox_command(vha, mcp);
3575 if (rval != QLA_SUCCESS) {
3576 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
3577 vha->host_no, rval, mcp->mb[0]));
3578 } else {
3579 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3580 *data = mcp->mb[3] << 16 | mcp->mb[2];
3581 }
3582
3583 return rval;
3584}
3585
3586int
3587qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3588{
3589 int rval;
3590 mbx_cmd_t mc;
3591 mbx_cmd_t *mcp = &mc;
3592
3593 if (!IS_FWI2_CAPABLE(vha->hw))
3594 return QLA_FUNCTION_FAILED;
3595
3596 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3597
3598 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
3599 mcp->mb[1] = LSW(risc_addr);
3600 mcp->mb[2] = LSW(data);
3601 mcp->mb[3] = MSW(data);
3602 mcp->mb[8] = MSW(risc_addr);
3603 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
3604 mcp->in_mb = MBX_0;
3605 mcp->tov = 30;
3606 mcp->flags = 0;
3607 rval = qla2x00_mailbox_command(vha, mcp);
3608 if (rval != QLA_SUCCESS) {
3609 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
3610 vha->host_no, rval, mcp->mb[0]));
3611 } else {
3612 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3613 }
3614
3615 return rval;
3616}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 51716c7e3008..650bcef08f2a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -398,9 +398,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
398 398
399 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); 399 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
400 400
401 memset(vha->req_ques, 0, sizeof(vha->req_ques)); 401 vha->req = base_vha->req;
402 vha->req_ques[0] = ha->req_q_map[0]->id; 402 host->can_queue = base_vha->req->length + 128;
403 host->can_queue = ha->req_q_map[0]->length + 128;
404 host->this_id = 255; 403 host->this_id = 255;
405 host->cmd_per_lun = 3; 404 host->cmd_per_lun = 3;
406 host->max_cmd_len = MAX_CMDSZ; 405 host->max_cmd_len = MAX_CMDSZ;
@@ -515,76 +514,53 @@ int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos)
515 514
516/* Delete all queues for a given vhost */ 515/* Delete all queues for a given vhost */
517int 516int
518qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no) 517qla25xx_delete_queues(struct scsi_qla_host *vha)
519{ 518{
520 int cnt, ret = 0; 519 int cnt, ret = 0;
521 struct req_que *req = NULL; 520 struct req_que *req = NULL;
522 struct rsp_que *rsp = NULL; 521 struct rsp_que *rsp = NULL;
523 struct qla_hw_data *ha = vha->hw; 522 struct qla_hw_data *ha = vha->hw;
524 523
525 if (que_no) { 524 /* Delete request queues */
526 /* Delete request queue */ 525 for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
527 req = ha->req_q_map[que_no]; 526 req = ha->req_q_map[cnt];
528 if (req) { 527 if (req) {
529 rsp = req->rsp;
530 ret = qla25xx_delete_req_que(vha, req); 528 ret = qla25xx_delete_req_que(vha, req);
531 if (ret != QLA_SUCCESS) { 529 if (ret != QLA_SUCCESS) {
532 qla_printk(KERN_WARNING, ha, 530 qla_printk(KERN_WARNING, ha,
533 "Couldn't delete req que %d\n", req->id); 531 "Couldn't delete req que %d\n",
532 req->id);
534 return ret; 533 return ret;
535 } 534 }
536 /* Delete associated response queue */
537 if (rsp) {
538 ret = qla25xx_delete_rsp_que(vha, rsp);
539 if (ret != QLA_SUCCESS) {
540 qla_printk(KERN_WARNING, ha,
541 "Couldn't delete rsp que %d\n",
542 rsp->id);
543 return ret;
544 }
545 }
546 } 535 }
547 } else { /* delete all queues of this host */ 536 }
548 for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) { 537
549 /* Delete request queues */ 538 /* Delete response queues */
550 req = ha->req_q_map[vha->req_ques[cnt]]; 539 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
551 if (req && req->id) { 540 rsp = ha->rsp_q_map[cnt];
552 rsp = req->rsp; 541 if (rsp) {
553 ret = qla25xx_delete_req_que(vha, req); 542 ret = qla25xx_delete_rsp_que(vha, rsp);
554 if (ret != QLA_SUCCESS) { 543 if (ret != QLA_SUCCESS) {
555 qla_printk(KERN_WARNING, ha, 544 qla_printk(KERN_WARNING, ha,
556 "Couldn't delete req que %d\n", 545 "Couldn't delete rsp que %d\n",
557 vha->req_ques[cnt]); 546 rsp->id);
558 return ret; 547 return ret;
559 }
560 vha->req_ques[cnt] = ha->req_q_map[0]->id;
561 /* Delete associated response queue */
562 if (rsp && rsp->id) {
563 ret = qla25xx_delete_rsp_que(vha, rsp);
564 if (ret != QLA_SUCCESS) {
565 qla_printk(KERN_WARNING, ha,
566 "Couldn't delete rsp que %d\n",
567 rsp->id);
568 return ret;
569 }
570 }
571 } 548 }
572 } 549 }
573 } 550 }
574 qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n",
575 vha->vp_idx);
576 return ret; 551 return ret;
577} 552}
578 553
579int 554int
580qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, 555qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
581 uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos) 556 uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
582{ 557{
583 int ret = 0; 558 int ret = 0;
584 struct req_que *req = NULL; 559 struct req_que *req = NULL;
585 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 560 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
586 uint16_t que_id = 0; 561 uint16_t que_id = 0;
587 device_reg_t __iomem *reg; 562 device_reg_t __iomem *reg;
563 uint32_t cnt;
588 564
589 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 565 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
590 if (req == NULL) { 566 if (req == NULL) {
@@ -604,8 +580,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
604 } 580 }
605 581
606 mutex_lock(&ha->vport_lock); 582 mutex_lock(&ha->vport_lock);
607 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues); 583 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
608 if (que_id >= ha->max_queues) { 584 if (que_id >= ha->max_req_queues) {
609 mutex_unlock(&ha->vport_lock); 585 mutex_unlock(&ha->vport_lock);
610 qla_printk(KERN_INFO, ha, "No resources to create " 586 qla_printk(KERN_INFO, ha, "No resources to create "
611 "additional request queue\n"); 587 "additional request queue\n");
@@ -617,10 +593,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
617 req->vp_idx = vp_idx; 593 req->vp_idx = vp_idx;
618 req->qos = qos; 594 req->qos = qos;
619 595
620 if (ha->rsp_q_map[rsp_que]) { 596 if (rsp_que < 0)
597 req->rsp = NULL;
598 else
621 req->rsp = ha->rsp_q_map[rsp_que]; 599 req->rsp = ha->rsp_q_map[rsp_que];
622 req->rsp->req = req;
623 }
624 /* Use alternate PCI bus number */ 600 /* Use alternate PCI bus number */
625 if (MSB(req->rid)) 601 if (MSB(req->rid))
626 options |= BIT_4; 602 options |= BIT_4;
@@ -628,13 +604,16 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
628 if (LSB(req->rid)) 604 if (LSB(req->rid))
629 options |= BIT_5; 605 options |= BIT_5;
630 req->options = options; 606 req->options = options;
607
608 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
609 req->outstanding_cmds[cnt] = NULL;
610 req->current_outstanding_cmd = 1;
611
631 req->ring_ptr = req->ring; 612 req->ring_ptr = req->ring;
632 req->ring_index = 0; 613 req->ring_index = 0;
633 req->cnt = req->length; 614 req->cnt = req->length;
634 req->id = que_id; 615 req->id = que_id;
635 reg = ISP_QUE_REG(ha, que_id); 616 reg = ISP_QUE_REG(ha, que_id);
636 req->req_q_in = &reg->isp25mq.req_q_in;
637 req->req_q_out = &reg->isp25mq.req_q_out;
638 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 617 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
639 mutex_unlock(&ha->vport_lock); 618 mutex_unlock(&ha->vport_lock);
640 619
@@ -654,10 +633,19 @@ que_failed:
654 return 0; 633 return 0;
655} 634}
656 635
636static void qla_do_work(struct work_struct *work)
637{
638 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
639 struct scsi_qla_host *vha;
640
641 vha = qla25xx_get_host(rsp);
642 qla24xx_process_response_queue(vha, rsp);
643}
644
657/* create response queue */ 645/* create response queue */
658int 646int
659qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, 647qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
660 uint8_t vp_idx, uint16_t rid) 648 uint8_t vp_idx, uint16_t rid, int req)
661{ 649{
662 int ret = 0; 650 int ret = 0;
663 struct rsp_que *rsp = NULL; 651 struct rsp_que *rsp = NULL;
@@ -672,7 +660,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
672 goto que_failed; 660 goto que_failed;
673 } 661 }
674 662
675 rsp->length = RESPONSE_ENTRY_CNT_2300; 663 rsp->length = RESPONSE_ENTRY_CNT_MQ;
676 rsp->ring = dma_alloc_coherent(&ha->pdev->dev, 664 rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
677 (rsp->length + 1) * sizeof(response_t), 665 (rsp->length + 1) * sizeof(response_t),
678 &rsp->dma, GFP_KERNEL); 666 &rsp->dma, GFP_KERNEL);
@@ -683,8 +671,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
683 } 671 }
684 672
685 mutex_lock(&ha->vport_lock); 673 mutex_lock(&ha->vport_lock);
686 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); 674 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
687 if (que_id >= ha->max_queues) { 675 if (que_id >= ha->max_rsp_queues) {
688 mutex_unlock(&ha->vport_lock); 676 mutex_unlock(&ha->vport_lock);
689 qla_printk(KERN_INFO, ha, "No resources to create " 677 qla_printk(KERN_INFO, ha, "No resources to create "
690 "additional response queue\n"); 678 "additional response queue\n");
@@ -708,8 +696,6 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
708 if (LSB(rsp->rid)) 696 if (LSB(rsp->rid))
709 options |= BIT_5; 697 options |= BIT_5;
710 rsp->options = options; 698 rsp->options = options;
711 rsp->ring_ptr = rsp->ring;
712 rsp->ring_index = 0;
713 rsp->id = que_id; 699 rsp->id = que_id;
714 reg = ISP_QUE_REG(ha, que_id); 700 reg = ISP_QUE_REG(ha, que_id);
715 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in; 701 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
@@ -728,9 +714,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
728 mutex_unlock(&ha->vport_lock); 714 mutex_unlock(&ha->vport_lock);
729 goto que_failed; 715 goto que_failed;
730 } 716 }
717 if (req >= 0)
718 rsp->req = ha->req_q_map[req];
719 else
720 rsp->req = NULL;
731 721
732 qla2x00_init_response_q_entries(rsp); 722 qla2x00_init_response_q_entries(rsp);
733 723 if (rsp->hw->wq)
724 INIT_WORK(&rsp->q_work, qla_do_work);
734 return rsp->id; 725 return rsp->id;
735 726
736que_failed: 727que_failed:
@@ -744,14 +735,16 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
744 uint16_t options = 0; 735 uint16_t options = 0;
745 uint8_t ret = 0; 736 uint8_t ret = 0;
746 struct qla_hw_data *ha = vha->hw; 737 struct qla_hw_data *ha = vha->hw;
738 struct rsp_que *rsp;
747 739
748 options |= BIT_1; 740 options |= BIT_1;
749 ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0); 741 ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0, -1);
750 if (!ret) { 742 if (!ret) {
751 qla_printk(KERN_WARNING, ha, "Response Que create failed\n"); 743 qla_printk(KERN_WARNING, ha, "Response Que create failed\n");
752 return ret; 744 return ret;
753 } else 745 } else
754 qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret); 746 qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret);
747 rsp = ha->rsp_q_map[ret];
755 748
756 options = 0; 749 options = 0;
757 if (qos & BIT_7) 750 if (qos & BIT_7)
@@ -759,10 +752,11 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
759 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret, 752 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret,
760 qos & ~BIT_7); 753 qos & ~BIT_7);
761 if (ret) { 754 if (ret) {
762 vha->req_ques[0] = ret; 755 vha->req = ha->req_q_map[ret];
763 qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret); 756 qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret);
764 } else 757 } else
765 qla_printk(KERN_WARNING, ha, "Request Que create failed\n"); 758 qla_printk(KERN_WARNING, ha, "Request Que create failed\n");
759 rsp->req = ha->req_q_map[ret];
766 760
767 return ret; 761 return ret;
768} 762}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e4fdcdad80d0..f0396e79b6fa 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -77,6 +77,14 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
77MODULE_PARM_DESC(ql2xmaxqdepth, 77MODULE_PARM_DESC(ql2xmaxqdepth,
78 "Maximum queue depth to report for target devices."); 78 "Maximum queue depth to report for target devices.");
79 79
80int ql2xqfulltracking = 1;
81module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(ql2xqfulltracking,
83 "Controls whether the driver tracks queue full status "
84 "returns and dynamically adjusts a scsi device's queue "
85 "depth. Default is 1, perform tracking. Set to 0 to "
86 "disable dynamic tracking and adjustment of queue depth.");
87
80int ql2xqfullrampup = 120; 88int ql2xqfullrampup = 120;
81module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR); 89module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(ql2xqfullrampup, 90MODULE_PARM_DESC(ql2xqfullrampup,
@@ -96,6 +104,23 @@ MODULE_PARM_DESC(ql2xmaxqueues,
96 "Enables MQ settings " 104 "Enables MQ settings "
97 "Default is 1 for single queue. Set it to number \ 105 "Default is 1 for single queue. Set it to number \
98 of queues in MQ mode."); 106 of queues in MQ mode.");
107
108int ql2xmultique_tag;
109module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR);
110MODULE_PARM_DESC(ql2xmultique_tag,
111 "Enables CPU affinity settings for the driver "
112 "Default is 0 for no affinity of request and response IO. "
113 "Set it to 1 to turn on the cpu affinity.");
114
115int ql2xfwloadbin;
116module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
117MODULE_PARM_DESC(ql2xfwloadbin,
118 "Option to specify location from which to load ISP firmware:\n"
119 " 2 -- load firmware via the request_firmware() (hotplug)\n"
120 " interface.\n"
121 " 1 -- load firmware from flash.\n"
122 " 0 -- use default semantics.\n");
123
99/* 124/*
100 * SCSI host template entry points 125 * SCSI host template entry points
101 */ 126 */
@@ -187,7 +212,7 @@ static void qla2x00_sp_free_dma(srb_t *);
187/* -------------------------------------------------------------------------- */ 212/* -------------------------------------------------------------------------- */
188static int qla2x00_alloc_queues(struct qla_hw_data *ha) 213static int qla2x00_alloc_queues(struct qla_hw_data *ha)
189{ 214{
190 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues, 215 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
191 GFP_KERNEL); 216 GFP_KERNEL);
192 if (!ha->req_q_map) { 217 if (!ha->req_q_map) {
193 qla_printk(KERN_WARNING, ha, 218 qla_printk(KERN_WARNING, ha,
@@ -195,7 +220,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
195 goto fail_req_map; 220 goto fail_req_map;
196 } 221 }
197 222
198 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues, 223 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
199 GFP_KERNEL); 224 GFP_KERNEL);
200 if (!ha->rsp_q_map) { 225 if (!ha->rsp_q_map) {
201 qla_printk(KERN_WARNING, ha, 226 qla_printk(KERN_WARNING, ha,
@@ -213,16 +238,8 @@ fail_req_map:
213 return -ENOMEM; 238 return -ENOMEM;
214} 239}
215 240
216static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req, 241static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
217 struct rsp_que *rsp)
218{ 242{
219 if (rsp && rsp->ring)
220 dma_free_coherent(&ha->pdev->dev,
221 (rsp->length + 1) * sizeof(response_t),
222 rsp->ring, rsp->dma);
223
224 kfree(rsp);
225 rsp = NULL;
226 if (req && req->ring) 243 if (req && req->ring)
227 dma_free_coherent(&ha->pdev->dev, 244 dma_free_coherent(&ha->pdev->dev,
228 (req->length + 1) * sizeof(request_t), 245 (req->length + 1) * sizeof(request_t),
@@ -232,22 +249,77 @@ static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
232 req = NULL; 249 req = NULL;
233} 250}
234 251
252static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
253{
254 if (rsp && rsp->ring)
255 dma_free_coherent(&ha->pdev->dev,
256 (rsp->length + 1) * sizeof(response_t),
257 rsp->ring, rsp->dma);
258
259 kfree(rsp);
260 rsp = NULL;
261}
262
235static void qla2x00_free_queues(struct qla_hw_data *ha) 263static void qla2x00_free_queues(struct qla_hw_data *ha)
236{ 264{
237 struct req_que *req; 265 struct req_que *req;
238 struct rsp_que *rsp; 266 struct rsp_que *rsp;
239 int cnt; 267 int cnt;
240 268
241 for (cnt = 0; cnt < ha->max_queues; cnt++) { 269 for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
242 rsp = ha->rsp_q_map[cnt];
243 req = ha->req_q_map[cnt]; 270 req = ha->req_q_map[cnt];
244 qla2x00_free_que(ha, req, rsp); 271 qla2x00_free_req_que(ha, req);
272 }
273 kfree(ha->req_q_map);
274 ha->req_q_map = NULL;
275
276 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
277 rsp = ha->rsp_q_map[cnt];
278 qla2x00_free_rsp_que(ha, rsp);
245 } 279 }
246 kfree(ha->rsp_q_map); 280 kfree(ha->rsp_q_map);
247 ha->rsp_q_map = NULL; 281 ha->rsp_q_map = NULL;
282}
248 283
249 kfree(ha->req_q_map); 284static int qla25xx_setup_mode(struct scsi_qla_host *vha)
250 ha->req_q_map = NULL; 285{
286 uint16_t options = 0;
287 int ques, req, ret;
288 struct qla_hw_data *ha = vha->hw;
289
290 if (ql2xmultique_tag) {
291 /* CPU affinity mode */
292 ha->wq = create_workqueue("qla2xxx_wq");
293 /* create a request queue for IO */
294 options |= BIT_7;
295 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
296 QLA_DEFAULT_QUE_QOS);
297 if (!req) {
298 qla_printk(KERN_WARNING, ha,
299 "Can't create request queue\n");
300 goto fail;
301 }
302 vha->req = ha->req_q_map[req];
303 options |= BIT_1;
304 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
305 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
306 if (!ret) {
307 qla_printk(KERN_WARNING, ha,
308 "Response Queue create failed\n");
309 goto fail2;
310 }
311 }
312 DEBUG2(qla_printk(KERN_INFO, ha,
313 "CPU affinity mode enabled, no. of response"
314 " queues:%d, no. of request queues:%d\n",
315 ha->max_rsp_queues, ha->max_req_queues));
316 }
317 return 0;
318fail2:
319 qla25xx_delete_queues(vha);
320fail:
321 ha->mqenable = 0;
322 return 1;
251} 323}
252 324
253static char * 325static char *
@@ -387,7 +459,6 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
387 459
388 sp->fcport = fcport; 460 sp->fcport = fcport;
389 sp->cmd = cmd; 461 sp->cmd = cmd;
390 sp->que = ha->req_q_map[0];
391 sp->flags = 0; 462 sp->flags = 0;
392 CMD_SP(cmd) = (void *)sp; 463 CMD_SP(cmd) = (void *)sp;
393 cmd->scsi_done = done; 464 cmd->scsi_done = done;
@@ -612,7 +683,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
612void 683void
613qla2x00_abort_fcport_cmds(fc_port_t *fcport) 684qla2x00_abort_fcport_cmds(fc_port_t *fcport)
614{ 685{
615 int cnt, que, id; 686 int cnt;
616 unsigned long flags; 687 unsigned long flags;
617 srb_t *sp; 688 srb_t *sp;
618 scsi_qla_host_t *vha = fcport->vha; 689 scsi_qla_host_t *vha = fcport->vha;
@@ -620,32 +691,27 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport)
620 struct req_que *req; 691 struct req_que *req;
621 692
622 spin_lock_irqsave(&ha->hardware_lock, flags); 693 spin_lock_irqsave(&ha->hardware_lock, flags);
623 for (que = 0; que < QLA_MAX_HOST_QUES; que++) { 694 req = vha->req;
624 id = vha->req_ques[que]; 695 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
625 req = ha->req_q_map[id]; 696 sp = req->outstanding_cmds[cnt];
626 if (!req) 697 if (!sp)
698 continue;
699 if (sp->fcport != fcport)
627 continue; 700 continue;
628 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
629 sp = req->outstanding_cmds[cnt];
630 if (!sp)
631 continue;
632 if (sp->fcport != fcport)
633 continue;
634 701
635 spin_unlock_irqrestore(&ha->hardware_lock, flags); 702 spin_unlock_irqrestore(&ha->hardware_lock, flags);
636 if (ha->isp_ops->abort_command(vha, sp, req)) { 703 if (ha->isp_ops->abort_command(sp)) {
704 DEBUG2(qla_printk(KERN_WARNING, ha,
705 "Abort failed -- %lx\n",
706 sp->cmd->serial_number));
707 } else {
708 if (qla2x00_eh_wait_on_command(sp->cmd) !=
709 QLA_SUCCESS)
637 DEBUG2(qla_printk(KERN_WARNING, ha, 710 DEBUG2(qla_printk(KERN_WARNING, ha,
638 "Abort failed -- %lx\n", 711 "Abort failed while waiting -- %lx\n",
639 sp->cmd->serial_number)); 712 sp->cmd->serial_number));
640 } else {
641 if (qla2x00_eh_wait_on_command(sp->cmd) !=
642 QLA_SUCCESS)
643 DEBUG2(qla_printk(KERN_WARNING, ha,
644 "Abort failed while waiting -- %lx\n",
645 sp->cmd->serial_number));
646 }
647 spin_lock_irqsave(&ha->hardware_lock, flags);
648 } 713 }
714 spin_lock_irqsave(&ha->hardware_lock, flags);
649 } 715 }
650 spin_unlock_irqrestore(&ha->hardware_lock, flags); 716 spin_unlock_irqrestore(&ha->hardware_lock, flags);
651} 717}
@@ -693,7 +759,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
693 unsigned long flags; 759 unsigned long flags;
694 int wait = 0; 760 int wait = 0;
695 struct qla_hw_data *ha = vha->hw; 761 struct qla_hw_data *ha = vha->hw;
696 struct req_que *req; 762 struct req_que *req = vha->req;
697 srb_t *spt; 763 srb_t *spt;
698 764
699 qla2x00_block_error_handler(cmd); 765 qla2x00_block_error_handler(cmd);
@@ -709,7 +775,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
709 spt = (srb_t *) CMD_SP(cmd); 775 spt = (srb_t *) CMD_SP(cmd);
710 if (!spt) 776 if (!spt)
711 return SUCCESS; 777 return SUCCESS;
712 req = spt->que;
713 778
714 /* Check active list for command command. */ 779 /* Check active list for command command. */
715 spin_lock_irqsave(&ha->hardware_lock, flags); 780 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -726,7 +791,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
726 " pid=%ld.\n", __func__, vha->host_no, sp, serial)); 791 " pid=%ld.\n", __func__, vha->host_no, sp, serial));
727 792
728 spin_unlock_irqrestore(&ha->hardware_lock, flags); 793 spin_unlock_irqrestore(&ha->hardware_lock, flags);
729 if (ha->isp_ops->abort_command(vha, sp, req)) { 794 if (ha->isp_ops->abort_command(sp)) {
730 DEBUG2(printk("%s(%ld): abort_command " 795 DEBUG2(printk("%s(%ld): abort_command "
731 "mbx failed.\n", __func__, vha->host_no)); 796 "mbx failed.\n", __func__, vha->host_no));
732 ret = FAILED; 797 ret = FAILED;
@@ -777,7 +842,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
777 return status; 842 return status;
778 843
779 spin_lock_irqsave(&ha->hardware_lock, flags); 844 spin_lock_irqsave(&ha->hardware_lock, flags);
780 req = sp->que; 845 req = vha->req;
781 for (cnt = 1; status == QLA_SUCCESS && 846 for (cnt = 1; status == QLA_SUCCESS &&
782 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 847 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
783 sp = req->outstanding_cmds[cnt]; 848 sp = req->outstanding_cmds[cnt];
@@ -820,7 +885,7 @@ static char *reset_errors[] = {
820 885
821static int 886static int
822__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 887__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
823 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) 888 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
824{ 889{
825 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 890 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
826 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 891 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
@@ -841,7 +906,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
841 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) 906 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
842 goto eh_reset_failed; 907 goto eh_reset_failed;
843 err = 2; 908 err = 2;
844 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) 909 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
910 != QLA_SUCCESS)
845 goto eh_reset_failed; 911 goto eh_reset_failed;
846 err = 3; 912 err = 3;
847 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 913 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
@@ -996,6 +1062,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
996 if (qla2x00_vp_abort_isp(vha)) 1062 if (qla2x00_vp_abort_isp(vha))
997 goto eh_host_reset_lock; 1063 goto eh_host_reset_lock;
998 } else { 1064 } else {
1065 if (ha->wq)
1066 flush_workqueue(ha->wq);
1067
999 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1068 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1000 if (qla2x00_abort_isp(base_vha)) { 1069 if (qla2x00_abort_isp(base_vha)) {
1001 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1070 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
@@ -1037,7 +1106,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1037 struct fc_port *fcport; 1106 struct fc_port *fcport;
1038 struct qla_hw_data *ha = vha->hw; 1107 struct qla_hw_data *ha = vha->hw;
1039 1108
1040 if (ha->flags.enable_lip_full_login && !vha->vp_idx) { 1109 if (ha->flags.enable_lip_full_login && !vha->vp_idx &&
1110 !IS_QLA81XX(ha)) {
1041 ret = qla2x00_full_login_lip(vha); 1111 ret = qla2x00_full_login_lip(vha);
1042 if (ret != QLA_SUCCESS) { 1112 if (ret != QLA_SUCCESS) {
1043 DEBUG2_3(printk("%s(%ld): failed: " 1113 DEBUG2_3(printk("%s(%ld): failed: "
@@ -1064,7 +1134,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1064 if (fcport->port_type != FCT_TARGET) 1134 if (fcport->port_type != FCT_TARGET)
1065 continue; 1135 continue;
1066 1136
1067 ret = ha->isp_ops->target_reset(fcport, 0); 1137 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1068 if (ret != QLA_SUCCESS) { 1138 if (ret != QLA_SUCCESS) {
1069 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1139 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1070 "target_reset=%d d_id=%x.\n", __func__, 1140 "target_reset=%d d_id=%x.\n", __func__,
@@ -1088,7 +1158,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1088 struct req_que *req; 1158 struct req_que *req;
1089 1159
1090 spin_lock_irqsave(&ha->hardware_lock, flags); 1160 spin_lock_irqsave(&ha->hardware_lock, flags);
1091 for (que = 0; que < ha->max_queues; que++) { 1161 for (que = 0; que < ha->max_req_queues; que++) {
1092 req = ha->req_q_map[que]; 1162 req = ha->req_q_map[que];
1093 if (!req) 1163 if (!req)
1094 continue; 1164 continue;
@@ -1123,7 +1193,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1123 scsi_qla_host_t *vha = shost_priv(sdev->host); 1193 scsi_qla_host_t *vha = shost_priv(sdev->host);
1124 struct qla_hw_data *ha = vha->hw; 1194 struct qla_hw_data *ha = vha->hw;
1125 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1195 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1126 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 1196 struct req_que *req = vha->req;
1127 1197
1128 if (sdev->tagged_supported) 1198 if (sdev->tagged_supported)
1129 scsi_activate_tcq(sdev, req->max_q_depth); 1199 scsi_activate_tcq(sdev, req->max_q_depth);
@@ -1511,6 +1581,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
1511 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1581 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1512 break; 1582 break;
1513 } 1583 }
1584
1585 /* Get adapter physical port no from interrupt pin register. */
1586 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
1587 if (ha->port_no & 1)
1588 ha->flags.port0 = 1;
1589 else
1590 ha->flags.port0 = 0;
1514} 1591}
1515 1592
1516static int 1593static int
@@ -1518,6 +1595,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1518{ 1595{
1519 resource_size_t pio; 1596 resource_size_t pio;
1520 uint16_t msix; 1597 uint16_t msix;
1598 int cpus;
1521 1599
1522 if (pci_request_selected_regions(ha->pdev, ha->bars, 1600 if (pci_request_selected_regions(ha->pdev, ha->bars,
1523 QLA2XXX_DRIVER_NAME)) { 1601 QLA2XXX_DRIVER_NAME)) {
@@ -1571,8 +1649,9 @@ skip_pio:
1571 } 1649 }
1572 1650
1573 /* Determine queue resources */ 1651 /* Determine queue resources */
1574 ha->max_queues = 1; 1652 ha->max_req_queues = ha->max_rsp_queues = 1;
1575 if (ql2xmaxqueues <= 1 || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 1653 if ((ql2xmaxqueues <= 1 || ql2xmultique_tag < 1) &&
1654 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1576 goto mqiobase_exit; 1655 goto mqiobase_exit;
1577 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1656 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1578 pci_resource_len(ha->pdev, 3)); 1657 pci_resource_len(ha->pdev, 3));
@@ -1582,18 +1661,24 @@ skip_pio:
1582 ha->msix_count = msix; 1661 ha->msix_count = msix;
1583 /* Max queues are bounded by available msix vectors */ 1662 /* Max queues are bounded by available msix vectors */
1584 /* queue 0 uses two msix vectors */ 1663 /* queue 0 uses two msix vectors */
1585 if (ha->msix_count - 1 < ql2xmaxqueues) 1664 if (ql2xmultique_tag) {
1586 ha->max_queues = ha->msix_count - 1; 1665 cpus = num_online_cpus();
1587 else if (ql2xmaxqueues > QLA_MQ_SIZE) 1666 ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
1588 ha->max_queues = QLA_MQ_SIZE; 1667 (cpus + 1) : (ha->msix_count - 1);
1589 else 1668 ha->max_req_queues = 2;
1590 ha->max_queues = ql2xmaxqueues; 1669 } else if (ql2xmaxqueues > 1) {
1670 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1671 QLA_MQ_SIZE : ql2xmaxqueues;
1672 DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
1673 " of request queues:%d\n", ha->max_req_queues));
1674 }
1591 qla_printk(KERN_INFO, ha, 1675 qla_printk(KERN_INFO, ha,
1592 "MSI-X vector count: %d\n", msix); 1676 "MSI-X vector count: %d\n", msix);
1593 } 1677 } else
1678 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
1594 1679
1595mqiobase_exit: 1680mqiobase_exit:
1596 ha->msix_count = ha->max_queues + 1; 1681 ha->msix_count = ha->max_rsp_queues + 1;
1597 return (0); 1682 return (0);
1598 1683
1599iospace_error_exit: 1684iospace_error_exit:
@@ -1605,6 +1690,9 @@ qla2xxx_scan_start(struct Scsi_Host *shost)
1605{ 1690{
1606 scsi_qla_host_t *vha = shost_priv(shost); 1691 scsi_qla_host_t *vha = shost_priv(shost);
1607 1692
1693 if (vha->hw->flags.running_gold_fw)
1694 return;
1695
1608 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1696 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1609 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1697 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1610 set_bit(RSCN_UPDATE, &vha->dpc_flags); 1698 set_bit(RSCN_UPDATE, &vha->dpc_flags);
@@ -1768,6 +1856,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1768 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 1856 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
1769 ha->gid_list_info_size = 8; 1857 ha->gid_list_info_size = 8;
1770 ha->optrom_size = OPTROM_SIZE_81XX; 1858 ha->optrom_size = OPTROM_SIZE_81XX;
1859 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
1771 ha->isp_ops = &qla81xx_isp_ops; 1860 ha->isp_ops = &qla81xx_isp_ops;
1772 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 1861 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
1773 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 1862 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
@@ -1803,14 +1892,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1803 1892
1804 ret = -ENOMEM; 1893 ret = -ENOMEM;
1805 qla2x00_mem_free(ha); 1894 qla2x00_mem_free(ha);
1806 qla2x00_free_que(ha, req, rsp); 1895 qla2x00_free_req_que(ha, req);
1896 qla2x00_free_rsp_que(ha, rsp);
1807 goto probe_hw_failed; 1897 goto probe_hw_failed;
1808 } 1898 }
1809 1899
1810 pci_set_drvdata(pdev, base_vha); 1900 pci_set_drvdata(pdev, base_vha);
1811 1901
1812 host = base_vha->host; 1902 host = base_vha->host;
1813 base_vha->req_ques[0] = req->id; 1903 base_vha->req = req;
1814 host->can_queue = req->length + 128; 1904 host->can_queue = req->length + 128;
1815 if (IS_QLA2XXX_MIDTYPE(ha)) 1905 if (IS_QLA2XXX_MIDTYPE(ha))
1816 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; 1906 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
@@ -1841,7 +1931,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1841 } 1931 }
1842 ha->rsp_q_map[0] = rsp; 1932 ha->rsp_q_map[0] = rsp;
1843 ha->req_q_map[0] = req; 1933 ha->req_q_map[0] = req;
1844 1934 rsp->req = req;
1935 req->rsp = rsp;
1936 set_bit(0, ha->req_qid_map);
1937 set_bit(0, ha->rsp_qid_map);
1845 /* FWI2-capable only. */ 1938 /* FWI2-capable only. */
1846 req->req_q_in = &ha->iobase->isp24.req_q_in; 1939 req->req_q_in = &ha->iobase->isp24.req_q_in;
1847 req->req_q_out = &ha->iobase->isp24.req_q_out; 1940 req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -1866,6 +1959,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1866 goto probe_failed; 1959 goto probe_failed;
1867 } 1960 }
1868 1961
1962 if (ha->mqenable)
1963 if (qla25xx_setup_mode(base_vha))
1964 qla_printk(KERN_WARNING, ha,
1965 "Can't create queues, falling back to single"
1966 " queue mode\n");
1967
1968 if (ha->flags.running_gold_fw)
1969 goto skip_dpc;
1970
1869 /* 1971 /*
1870 * Startup the kernel thread for this host adapter 1972 * Startup the kernel thread for this host adapter
1871 */ 1973 */
@@ -1878,6 +1980,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1878 goto probe_failed; 1980 goto probe_failed;
1879 } 1981 }
1880 1982
1983skip_dpc:
1881 list_add_tail(&base_vha->list, &ha->vp_list); 1984 list_add_tail(&base_vha->list, &ha->vp_list);
1882 base_vha->host->irq = ha->pdev->irq; 1985 base_vha->host->irq = ha->pdev->irq;
1883 1986
@@ -1917,8 +2020,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1917 return 0; 2020 return 0;
1918 2021
1919probe_init_failed: 2022probe_init_failed:
1920 qla2x00_free_que(ha, req, rsp); 2023 qla2x00_free_req_que(ha, req);
1921 ha->max_queues = 0; 2024 qla2x00_free_rsp_que(ha, rsp);
2025 ha->max_req_queues = ha->max_rsp_queues = 0;
1922 2026
1923probe_failed: 2027probe_failed:
1924 if (base_vha->timer_active) 2028 if (base_vha->timer_active)
@@ -1976,6 +2080,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
1976 2080
1977 base_vha->flags.online = 0; 2081 base_vha->flags.online = 0;
1978 2082
2083 /* Flush the work queue and remove it */
2084 if (ha->wq) {
2085 flush_workqueue(ha->wq);
2086 destroy_workqueue(ha->wq);
2087 ha->wq = NULL;
2088 }
2089
1979 /* Kill the kernel thread for this host */ 2090 /* Kill the kernel thread for this host */
1980 if (ha->dpc_thread) { 2091 if (ha->dpc_thread) {
1981 struct task_struct *t = ha->dpc_thread; 2092 struct task_struct *t = ha->dpc_thread;
@@ -2017,6 +2128,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2017{ 2128{
2018 struct qla_hw_data *ha = vha->hw; 2129 struct qla_hw_data *ha = vha->hw;
2019 2130
2131 qla25xx_delete_queues(vha);
2132
2020 if (ha->flags.fce_enabled) 2133 if (ha->flags.fce_enabled)
2021 qla2x00_disable_fce_trace(vha, NULL, NULL); 2134 qla2x00_disable_fce_trace(vha, NULL, NULL);
2022 2135
@@ -2329,6 +2442,14 @@ qla2x00_mem_free(struct qla_hw_data *ha)
2329 vfree(ha->fw_dump); 2442 vfree(ha->fw_dump);
2330 } 2443 }
2331 2444
2445 if (ha->dcbx_tlv)
2446 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
2447 ha->dcbx_tlv, ha->dcbx_tlv_dma);
2448
2449 if (ha->xgmac_data)
2450 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
2451 ha->xgmac_data, ha->xgmac_data_dma);
2452
2332 if (ha->sns_cmd) 2453 if (ha->sns_cmd)
2333 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 2454 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2334 ha->sns_cmd, ha->sns_cmd_dma); 2455 ha->sns_cmd, ha->sns_cmd_dma);
@@ -2412,6 +2533,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
2412 INIT_LIST_HEAD(&vha->work_list); 2533 INIT_LIST_HEAD(&vha->work_list);
2413 INIT_LIST_HEAD(&vha->list); 2534 INIT_LIST_HEAD(&vha->list);
2414 2535
2536 spin_lock_init(&vha->work_lock);
2537
2415 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 2538 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
2416 return vha; 2539 return vha;
2417 2540
@@ -2420,13 +2543,11 @@ fail:
2420} 2543}
2421 2544
2422static struct qla_work_evt * 2545static struct qla_work_evt *
2423qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type, 2546qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
2424 int locked)
2425{ 2547{
2426 struct qla_work_evt *e; 2548 struct qla_work_evt *e;
2427 2549
2428 e = kzalloc(sizeof(struct qla_work_evt), locked ? GFP_ATOMIC: 2550 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
2429 GFP_KERNEL);
2430 if (!e) 2551 if (!e)
2431 return NULL; 2552 return NULL;
2432 2553
@@ -2437,17 +2558,15 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type,
2437} 2558}
2438 2559
2439static int 2560static int
2440qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e, int locked) 2561qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
2441{ 2562{
2442 unsigned long uninitialized_var(flags); 2563 unsigned long flags;
2443 struct qla_hw_data *ha = vha->hw;
2444 2564
2445 if (!locked) 2565 spin_lock_irqsave(&vha->work_lock, flags);
2446 spin_lock_irqsave(&ha->hardware_lock, flags);
2447 list_add_tail(&e->list, &vha->work_list); 2566 list_add_tail(&e->list, &vha->work_list);
2567 spin_unlock_irqrestore(&vha->work_lock, flags);
2448 qla2xxx_wake_dpc(vha); 2568 qla2xxx_wake_dpc(vha);
2449 if (!locked) 2569
2450 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2451 return QLA_SUCCESS; 2570 return QLA_SUCCESS;
2452} 2571}
2453 2572
@@ -2457,13 +2576,13 @@ qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
2457{ 2576{
2458 struct qla_work_evt *e; 2577 struct qla_work_evt *e;
2459 2578
2460 e = qla2x00_alloc_work(vha, QLA_EVT_AEN, 1); 2579 e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
2461 if (!e) 2580 if (!e)
2462 return QLA_FUNCTION_FAILED; 2581 return QLA_FUNCTION_FAILED;
2463 2582
2464 e->u.aen.code = code; 2583 e->u.aen.code = code;
2465 e->u.aen.data = data; 2584 e->u.aen.data = data;
2466 return qla2x00_post_work(vha, e, 1); 2585 return qla2x00_post_work(vha, e);
2467} 2586}
2468 2587
2469int 2588int
@@ -2471,25 +2590,27 @@ qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
2471{ 2590{
2472 struct qla_work_evt *e; 2591 struct qla_work_evt *e;
2473 2592
2474 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK, 1); 2593 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
2475 if (!e) 2594 if (!e)
2476 return QLA_FUNCTION_FAILED; 2595 return QLA_FUNCTION_FAILED;
2477 2596
2478 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 2597 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
2479 return qla2x00_post_work(vha, e, 1); 2598 return qla2x00_post_work(vha, e);
2480} 2599}
2481 2600
2482static void 2601static void
2483qla2x00_do_work(struct scsi_qla_host *vha) 2602qla2x00_do_work(struct scsi_qla_host *vha)
2484{ 2603{
2485 struct qla_work_evt *e; 2604 struct qla_work_evt *e, *tmp;
2486 struct qla_hw_data *ha = vha->hw; 2605 unsigned long flags;
2606 LIST_HEAD(work);
2487 2607
2488 spin_lock_irq(&ha->hardware_lock); 2608 spin_lock_irqsave(&vha->work_lock, flags);
2489 while (!list_empty(&vha->work_list)) { 2609 list_splice_init(&vha->work_list, &work);
2490 e = list_entry(vha->work_list.next, struct qla_work_evt, list); 2610 spin_unlock_irqrestore(&vha->work_lock, flags);
2611
2612 list_for_each_entry_safe(e, tmp, &work, list) {
2491 list_del_init(&e->list); 2613 list_del_init(&e->list);
2492 spin_unlock_irq(&ha->hardware_lock);
2493 2614
2494 switch (e->type) { 2615 switch (e->type) {
2495 case QLA_EVT_AEN: 2616 case QLA_EVT_AEN:
@@ -2502,10 +2623,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
2502 } 2623 }
2503 if (e->flags & QLA_EVT_FLAG_FREE) 2624 if (e->flags & QLA_EVT_FLAG_FREE)
2504 kfree(e); 2625 kfree(e);
2505 spin_lock_irq(&ha->hardware_lock);
2506 } 2626 }
2507 spin_unlock_irq(&ha->hardware_lock);
2508} 2627}
2628
2509/* Relogins all the fcports of a vport 2629/* Relogins all the fcports of a vport
2510 * Context: dpc thread 2630 * Context: dpc thread
2511 */ 2631 */
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 152ecfc26cd2..010e69b29afe 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -219,8 +219,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
219 wait_cnt = NVR_WAIT_CNT; 219 wait_cnt = NVR_WAIT_CNT;
220 do { 220 do {
221 if (!--wait_cnt) { 221 if (!--wait_cnt) {
222 DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n", 222 DEBUG9_10(qla_printk(KERN_WARNING, ha,
223 __func__, vha->host_no)); 223 "NVRAM didn't go ready...\n"));
224 break; 224 break;
225 } 225 }
226 NVRAM_DELAY(); 226 NVRAM_DELAY();
@@ -349,7 +349,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
349 wait_cnt = NVR_WAIT_CNT; 349 wait_cnt = NVR_WAIT_CNT;
350 do { 350 do {
351 if (!--wait_cnt) { 351 if (!--wait_cnt) {
352 DEBUG9_10(qla_printk( 352 DEBUG9_10(qla_printk(KERN_WARNING, ha,
353 "NVRAM didn't go ready...\n")); 353 "NVRAM didn't go ready...\n"));
354 break; 354 break;
355 } 355 }
@@ -408,7 +408,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
408 wait_cnt = NVR_WAIT_CNT; 408 wait_cnt = NVR_WAIT_CNT;
409 do { 409 do {
410 if (!--wait_cnt) { 410 if (!--wait_cnt) {
411 DEBUG9_10(qla_printk("NVRAM didn't go ready...\n")); 411 DEBUG9_10(qla_printk(KERN_WARNING, ha,
412 "NVRAM didn't go ready...\n"));
412 break; 413 break;
413 } 414 }
414 NVRAM_DELAY(); 415 NVRAM_DELAY();
@@ -701,32 +702,35 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
701 break; 702 break;
702 case FLT_REG_VPD_0: 703 case FLT_REG_VPD_0:
703 ha->flt_region_vpd_nvram = start; 704 ha->flt_region_vpd_nvram = start;
704 if (!(PCI_FUNC(ha->pdev->devfn) & 1)) 705 if (ha->flags.port0)
705 ha->flt_region_vpd = start; 706 ha->flt_region_vpd = start;
706 break; 707 break;
707 case FLT_REG_VPD_1: 708 case FLT_REG_VPD_1:
708 if (PCI_FUNC(ha->pdev->devfn) & 1) 709 if (!ha->flags.port0)
709 ha->flt_region_vpd = start; 710 ha->flt_region_vpd = start;
710 break; 711 break;
711 case FLT_REG_NVRAM_0: 712 case FLT_REG_NVRAM_0:
712 if (!(PCI_FUNC(ha->pdev->devfn) & 1)) 713 if (ha->flags.port0)
713 ha->flt_region_nvram = start; 714 ha->flt_region_nvram = start;
714 break; 715 break;
715 case FLT_REG_NVRAM_1: 716 case FLT_REG_NVRAM_1:
716 if (PCI_FUNC(ha->pdev->devfn) & 1) 717 if (!ha->flags.port0)
717 ha->flt_region_nvram = start; 718 ha->flt_region_nvram = start;
718 break; 719 break;
719 case FLT_REG_FDT: 720 case FLT_REG_FDT:
720 ha->flt_region_fdt = start; 721 ha->flt_region_fdt = start;
721 break; 722 break;
722 case FLT_REG_NPIV_CONF_0: 723 case FLT_REG_NPIV_CONF_0:
723 if (!(PCI_FUNC(ha->pdev->devfn) & 1)) 724 if (ha->flags.port0)
724 ha->flt_region_npiv_conf = start; 725 ha->flt_region_npiv_conf = start;
725 break; 726 break;
726 case FLT_REG_NPIV_CONF_1: 727 case FLT_REG_NPIV_CONF_1:
727 if (PCI_FUNC(ha->pdev->devfn) & 1) 728 if (!ha->flags.port0)
728 ha->flt_region_npiv_conf = start; 729 ha->flt_region_npiv_conf = start;
729 break; 730 break;
731 case FLT_REG_GOLD_FW:
732 ha->flt_region_gold_fw = start;
733 break;
730 } 734 }
731 } 735 }
732 goto done; 736 goto done;
@@ -744,12 +748,12 @@ no_flash_data:
744 ha->flt_region_fw = def_fw[def]; 748 ha->flt_region_fw = def_fw[def];
745 ha->flt_region_boot = def_boot[def]; 749 ha->flt_region_boot = def_boot[def];
746 ha->flt_region_vpd_nvram = def_vpd_nvram[def]; 750 ha->flt_region_vpd_nvram = def_vpd_nvram[def];
747 ha->flt_region_vpd = !(PCI_FUNC(ha->pdev->devfn) & 1) ? 751 ha->flt_region_vpd = ha->flags.port0 ?
748 def_vpd0[def]: def_vpd1[def]; 752 def_vpd0[def]: def_vpd1[def];
749 ha->flt_region_nvram = !(PCI_FUNC(ha->pdev->devfn) & 1) ? 753 ha->flt_region_nvram = ha->flags.port0 ?
750 def_nvram0[def]: def_nvram1[def]; 754 def_nvram0[def]: def_nvram1[def];
751 ha->flt_region_fdt = def_fdt[def]; 755 ha->flt_region_fdt = def_fdt[def];
752 ha->flt_region_npiv_conf = !(PCI_FUNC(ha->pdev->devfn) & 1) ? 756 ha->flt_region_npiv_conf = ha->flags.port0 ?
753 def_npiv_conf0[def]: def_npiv_conf1[def]; 757 def_npiv_conf0[def]: def_npiv_conf1[def];
754done: 758done:
755 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " 759 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
@@ -924,6 +928,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
924 struct fc_vport_identifiers vid; 928 struct fc_vport_identifiers vid;
925 struct fc_vport *vport; 929 struct fc_vport *vport;
926 930
931 memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry));
932
927 flags = le16_to_cpu(entry->flags); 933 flags = le16_to_cpu(entry->flags);
928 if (flags == 0xffff) 934 if (flags == 0xffff)
929 continue; 935 continue;
@@ -937,11 +943,11 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
937 vid.port_name = wwn_to_u64(entry->port_name); 943 vid.port_name = wwn_to_u64(entry->port_name);
938 vid.node_name = wwn_to_u64(entry->node_name); 944 vid.node_name = wwn_to_u64(entry->node_name);
939 945
940 memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry)); 946 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx "
941
942 DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
943 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, 947 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
944 vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id), 948 (unsigned long long)vid.port_name,
949 (unsigned long long)vid.node_name,
950 le16_to_cpu(entry->vf_id),
945 entry->q_qos, entry->f_qos)); 951 entry->q_qos, entry->f_qos));
946 952
947 if (i < QLA_PRECONFIG_VPORTS) { 953 if (i < QLA_PRECONFIG_VPORTS) {
@@ -950,12 +956,12 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
950 qla_printk(KERN_INFO, ha, 956 qla_printk(KERN_INFO, ha,
951 "NPIV-Config: Failed to create vport [%02x]: " 957 "NPIV-Config: Failed to create vport [%02x]: "
952 "wwpn=%llx wwnn=%llx.\n", cnt, 958 "wwpn=%llx wwnn=%llx.\n", cnt,
953 vid.port_name, vid.node_name); 959 (unsigned long long)vid.port_name,
960 (unsigned long long)vid.node_name);
954 } 961 }
955 } 962 }
956done: 963done:
957 kfree(data); 964 kfree(data);
958 ha->npiv_info = NULL;
959} 965}
960 966
961static int 967static int
@@ -1079,8 +1085,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1079 0xff0000) | ((fdata >> 16) & 0xff)); 1085 0xff0000) | ((fdata >> 16) & 0xff));
1080 ret = qla24xx_erase_sector(vha, fdata); 1086 ret = qla24xx_erase_sector(vha, fdata);
1081 if (ret != QLA_SUCCESS) { 1087 if (ret != QLA_SUCCESS) {
1082 DEBUG9(qla_printk("Unable to erase sector: " 1088 DEBUG9(qla_printk(KERN_WARNING, ha,
1083 "address=%x.\n", faddr)); 1089 "Unable to erase sector: address=%x.\n",
1090 faddr));
1084 break; 1091 break;
1085 } 1092 }
1086 } 1093 }
@@ -1240,8 +1247,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1240 ret = qla24xx_write_flash_dword(ha, 1247 ret = qla24xx_write_flash_dword(ha,
1241 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr)); 1248 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
1242 if (ret != QLA_SUCCESS) { 1249 if (ret != QLA_SUCCESS) {
1243 DEBUG9(qla_printk("Unable to program nvram address=%x " 1250 DEBUG9(qla_printk(KERN_WARNING, ha,
1244 "data=%x.\n", naddr, *dwptr)); 1251 "Unable to program nvram address=%x data=%x.\n",
1252 naddr, *dwptr));
1245 break; 1253 break;
1246 } 1254 }
1247 } 1255 }
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 19d1afc3a343..84369705a9ad 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.01-k1" 10#define QLA2XXX_VERSION "8.03.01-k4"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 166417a6afba..2de5f3ad640b 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1225,8 +1225,8 @@ EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1225 * @starget: SCSI target pointer 1225 * @starget: SCSI target pointer
1226 * @lun: SCSI Logical Unit Number 1226 * @lun: SCSI Logical Unit Number
1227 * 1227 *
1228 * Description: Looks up the scsi_device with the specified @channel, @id, @lun 1228 * Description: Looks up the scsi_device with the specified @lun for a given
1229 * for a given host. The returned scsi_device has an additional reference that 1229 * @starget. The returned scsi_device has an additional reference that
1230 * needs to be released with scsi_device_put once you're done with it. 1230 * needs to be released with scsi_device_put once you're done with it.
1231 **/ 1231 **/
1232struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, 1232struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 213123b0486b..fb9af207d61d 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -101,6 +101,8 @@ static const char * scsi_debug_version_date = "20070104";
101#define DEF_DIF 0 101#define DEF_DIF 0
102#define DEF_GUARD 0 102#define DEF_GUARD 0
103#define DEF_ATO 1 103#define DEF_ATO 1
104#define DEF_PHYSBLK_EXP 0
105#define DEF_LOWEST_ALIGNED 0
104 106
105/* bit mask values for scsi_debug_opts */ 107/* bit mask values for scsi_debug_opts */
106#define SCSI_DEBUG_OPT_NOISE 1 108#define SCSI_DEBUG_OPT_NOISE 1
@@ -156,6 +158,8 @@ static int scsi_debug_dix = DEF_DIX;
156static int scsi_debug_dif = DEF_DIF; 158static int scsi_debug_dif = DEF_DIF;
157static int scsi_debug_guard = DEF_GUARD; 159static int scsi_debug_guard = DEF_GUARD;
158static int scsi_debug_ato = DEF_ATO; 160static int scsi_debug_ato = DEF_ATO;
161static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
162static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
159 163
160static int scsi_debug_cmnd_count = 0; 164static int scsi_debug_cmnd_count = 0;
161 165
@@ -657,7 +661,12 @@ static unsigned char vpdb0_data[] = {
657 661
658static int inquiry_evpd_b0(unsigned char * arr) 662static int inquiry_evpd_b0(unsigned char * arr)
659{ 663{
664 unsigned int gran;
665
660 memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); 666 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
667 gran = 1 << scsi_debug_physblk_exp;
668 arr[2] = (gran >> 8) & 0xff;
669 arr[3] = gran & 0xff;
661 if (sdebug_store_sectors > 0x400) { 670 if (sdebug_store_sectors > 0x400) {
662 arr[4] = (sdebug_store_sectors >> 24) & 0xff; 671 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
663 arr[5] = (sdebug_store_sectors >> 16) & 0xff; 672 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
@@ -887,7 +896,7 @@ static int resp_start_stop(struct scsi_cmnd * scp,
887static sector_t get_sdebug_capacity(void) 896static sector_t get_sdebug_capacity(void)
888{ 897{
889 if (scsi_debug_virtual_gb > 0) 898 if (scsi_debug_virtual_gb > 0)
890 return 2048 * 1024 * scsi_debug_virtual_gb; 899 return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb;
891 else 900 else
892 return sdebug_store_sectors; 901 return sdebug_store_sectors;
893} 902}
@@ -945,6 +954,9 @@ static int resp_readcap16(struct scsi_cmnd * scp,
945 arr[9] = (scsi_debug_sector_size >> 16) & 0xff; 954 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
946 arr[10] = (scsi_debug_sector_size >> 8) & 0xff; 955 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
947 arr[11] = scsi_debug_sector_size & 0xff; 956 arr[11] = scsi_debug_sector_size & 0xff;
957 arr[13] = scsi_debug_physblk_exp & 0xf;
958 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
959 arr[15] = scsi_debug_lowest_aligned & 0xff;
948 960
949 if (scsi_debug_dif) { 961 if (scsi_debug_dif) {
950 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */ 962 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
@@ -2380,6 +2392,8 @@ module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2380module_param_named(dif, scsi_debug_dif, int, S_IRUGO); 2392module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2381module_param_named(guard, scsi_debug_guard, int, S_IRUGO); 2393module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2382module_param_named(ato, scsi_debug_ato, int, S_IRUGO); 2394module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2395module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2396module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2383 2397
2384MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); 2398MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2385MODULE_DESCRIPTION("SCSI debug adapter driver"); 2399MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2401,7 +2415,9 @@ MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2401MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); 2415MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2402MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); 2416MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2403MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); 2417MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2404MODULE_PARM_DESC(sector_size, "hardware sector size in bytes (def=512)"); 2418MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2419MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2420MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2405MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); 2421MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2406MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); 2422MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2407MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); 2423MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
@@ -2874,6 +2890,18 @@ static int __init scsi_debug_init(void)
2874 return -EINVAL; 2890 return -EINVAL;
2875 } 2891 }
2876 2892
2893 if (scsi_debug_physblk_exp > 15) {
2894 printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
2895 scsi_debug_physblk_exp);
2896 return -EINVAL;
2897 }
2898
2899 if (scsi_debug_lowest_aligned > 0x3fff) {
2900 printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
2901 scsi_debug_lowest_aligned);
2902 return -EINVAL;
2903 }
2904
2877 if (scsi_debug_dev_size_mb < 1) 2905 if (scsi_debug_dev_size_mb < 1)
2878 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ 2906 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
2879 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576; 2907 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index b13481369642..93c2622cb969 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -24,6 +24,13 @@ struct scsi_dev_info_list {
24 unsigned compatible; /* for use with scsi_static_device_list entries */ 24 unsigned compatible; /* for use with scsi_static_device_list entries */
25}; 25};
26 26
27struct scsi_dev_info_list_table {
28 struct list_head node; /* our node for being on the master list */
29 struct list_head scsi_dev_info_list; /* head of dev info list */
30 const char *name; /* name of list for /proc (NULL for global) */
31 int key; /* unique numeric identifier */
32};
33
27 34
28static const char spaces[] = " "; /* 16 of them */ 35static const char spaces[] = " "; /* 16 of them */
29static unsigned scsi_default_dev_flags; 36static unsigned scsi_default_dev_flags;
@@ -225,6 +232,7 @@ static struct {
225 {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 232 {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
226 {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 233 {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
227 {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 234 {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
235 {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
228 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, 236 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
229 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, 237 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
230 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ 238 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
@@ -246,6 +254,22 @@ static struct {
246 { NULL, NULL, NULL, 0 }, 254 { NULL, NULL, NULL, 0 },
247}; 255};
248 256
257static struct scsi_dev_info_list_table *scsi_devinfo_lookup_by_key(int key)
258{
259 struct scsi_dev_info_list_table *devinfo_table;
260 int found = 0;
261
262 list_for_each_entry(devinfo_table, &scsi_dev_info_list, node)
263 if (devinfo_table->key == key) {
264 found = 1;
265 break;
266 }
267 if (!found)
268 return ERR_PTR(-EINVAL);
269
270 return devinfo_table;
271}
272
249/* 273/*
250 * scsi_strcpy_devinfo: called from scsi_dev_info_list_add to copy into 274 * scsi_strcpy_devinfo: called from scsi_dev_info_list_add to copy into
251 * devinfo vendor and model strings. 275 * devinfo vendor and model strings.
@@ -295,7 +319,38 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
295static int scsi_dev_info_list_add(int compatible, char *vendor, char *model, 319static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
296 char *strflags, int flags) 320 char *strflags, int flags)
297{ 321{
322 return scsi_dev_info_list_add_keyed(compatible, vendor, model,
323 strflags, flags,
324 SCSI_DEVINFO_GLOBAL);
325}
326
327/**
328 * scsi_dev_info_list_add_keyed - add one dev_info list entry.
329 * @compatible: if true, null terminate short strings. Otherwise space pad.
330 * @vendor: vendor string
331 * @model: model (product) string
332 * @strflags: integer string
333 * @flags: if strflags NULL, use this flag value
334 * @key: specify list to use
335 *
336 * Description:
337 * Create and add one dev_info entry for @vendor, @model,
338 * @strflags or @flag in list specified by @key. If @compatible,
339 * add to the tail of the list, do not space pad, and set
340 * devinfo->compatible. The scsi_static_device_list entries are
341 * added with @compatible 1 and @clfags NULL.
342 *
343 * Returns: 0 OK, -error on failure.
344 **/
345int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
346 char *strflags, int flags, int key)
347{
298 struct scsi_dev_info_list *devinfo; 348 struct scsi_dev_info_list *devinfo;
349 struct scsi_dev_info_list_table *devinfo_table =
350 scsi_devinfo_lookup_by_key(key);
351
352 if (IS_ERR(devinfo_table))
353 return PTR_ERR(devinfo_table);
299 354
300 devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL); 355 devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL);
301 if (!devinfo) { 356 if (!devinfo) {
@@ -316,12 +371,15 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
316 devinfo->compatible = compatible; 371 devinfo->compatible = compatible;
317 372
318 if (compatible) 373 if (compatible)
319 list_add_tail(&devinfo->dev_info_list, &scsi_dev_info_list); 374 list_add_tail(&devinfo->dev_info_list,
375 &devinfo_table->scsi_dev_info_list);
320 else 376 else
321 list_add(&devinfo->dev_info_list, &scsi_dev_info_list); 377 list_add(&devinfo->dev_info_list,
378 &devinfo_table->scsi_dev_info_list);
322 379
323 return 0; 380 return 0;
324} 381}
382EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
325 383
326/** 384/**
327 * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list. 385 * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list.
@@ -381,22 +439,48 @@ static int scsi_dev_info_list_add_str(char *dev_list)
381 * @model: model name 439 * @model: model name
382 * 440 *
383 * Description: 441 * Description:
384 * Search the scsi_dev_info_list for an entry matching @vendor and 442 * Search the global scsi_dev_info_list (specified by list zero)
385 * @model, if found, return the matching flags value, else return 443 * for an entry matching @vendor and @model, if found, return the
386 * the host or global default settings. Called during scan time. 444 * matching flags value, else return the host or global default
445 * settings. Called during scan time.
387 **/ 446 **/
388int scsi_get_device_flags(struct scsi_device *sdev, 447int scsi_get_device_flags(struct scsi_device *sdev,
389 const unsigned char *vendor, 448 const unsigned char *vendor,
390 const unsigned char *model) 449 const unsigned char *model)
391{ 450{
451 return scsi_get_device_flags_keyed(sdev, vendor, model,
452 SCSI_DEVINFO_GLOBAL);
453}
454
455
456/**
457 * get_device_flags_keyed - get device specific flags from the dynamic device list.
458 * @sdev: &scsi_device to get flags for
459 * @vendor: vendor name
460 * @model: model name
461 * @key: list to look up
462 *
463 * Description:
464 * Search the scsi_dev_info_list specified by @key for an entry
465 * matching @vendor and @model, if found, return the matching
466 * flags value, else return the host or global default settings.
467 * Called during scan time.
468 **/
469int scsi_get_device_flags_keyed(struct scsi_device *sdev,
470 const unsigned char *vendor,
471 const unsigned char *model,
472 int key)
473{
392 struct scsi_dev_info_list *devinfo; 474 struct scsi_dev_info_list *devinfo;
393 unsigned int bflags; 475 struct scsi_dev_info_list_table *devinfo_table;
476
477 devinfo_table = scsi_devinfo_lookup_by_key(key);
394 478
395 bflags = sdev->sdev_bflags; 479 if (IS_ERR(devinfo_table))
396 if (!bflags) 480 return PTR_ERR(devinfo_table);
397 bflags = scsi_default_dev_flags;
398 481
399 list_for_each_entry(devinfo, &scsi_dev_info_list, dev_info_list) { 482 list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list,
483 dev_info_list) {
400 if (devinfo->compatible) { 484 if (devinfo->compatible) {
401 /* 485 /*
402 * Behave like the older version of get_device_flags. 486 * Behave like the older version of get_device_flags.
@@ -446,32 +530,89 @@ int scsi_get_device_flags(struct scsi_device *sdev,
446 return devinfo->flags; 530 return devinfo->flags;
447 } 531 }
448 } 532 }
449 return bflags; 533 /* nothing found, return nothing */
534 if (key != SCSI_DEVINFO_GLOBAL)
535 return 0;
536
537 /* except for the global list, where we have an exception */
538 if (sdev->sdev_bflags)
539 return sdev->sdev_bflags;
540
541 return scsi_default_dev_flags;
450} 542}
543EXPORT_SYMBOL(scsi_get_device_flags_keyed);
451 544
452#ifdef CONFIG_SCSI_PROC_FS 545#ifdef CONFIG_SCSI_PROC_FS
546struct double_list {
547 struct list_head *top;
548 struct list_head *bottom;
549};
550
453static int devinfo_seq_show(struct seq_file *m, void *v) 551static int devinfo_seq_show(struct seq_file *m, void *v)
454{ 552{
553 struct double_list *dl = v;
554 struct scsi_dev_info_list_table *devinfo_table =
555 list_entry(dl->top, struct scsi_dev_info_list_table, node);
455 struct scsi_dev_info_list *devinfo = 556 struct scsi_dev_info_list *devinfo =
456 list_entry(v, struct scsi_dev_info_list, dev_info_list); 557 list_entry(dl->bottom, struct scsi_dev_info_list,
558 dev_info_list);
559
560 if (devinfo_table->scsi_dev_info_list.next == dl->bottom &&
561 devinfo_table->name)
562 seq_printf(m, "[%s]:\n", devinfo_table->name);
457 563
458 seq_printf(m, "'%.8s' '%.16s' 0x%x\n", 564 seq_printf(m, "'%.8s' '%.16s' 0x%x\n",
459 devinfo->vendor, devinfo->model, devinfo->flags); 565 devinfo->vendor, devinfo->model, devinfo->flags);
460 return 0; 566 return 0;
461} 567}
462 568
463static void * devinfo_seq_start(struct seq_file *m, loff_t *pos) 569static void *devinfo_seq_start(struct seq_file *m, loff_t *ppos)
464{ 570{
465 return seq_list_start(&scsi_dev_info_list, *pos); 571 struct double_list *dl = kmalloc(sizeof(*dl), GFP_KERNEL);
572 loff_t pos = *ppos;
573
574 if (!dl)
575 return NULL;
576
577 list_for_each(dl->top, &scsi_dev_info_list) {
578 struct scsi_dev_info_list_table *devinfo_table =
579 list_entry(dl->top, struct scsi_dev_info_list_table,
580 node);
581 list_for_each(dl->bottom, &devinfo_table->scsi_dev_info_list)
582 if (pos-- == 0)
583 return dl;
584 }
585
586 kfree(dl);
587 return NULL;
466} 588}
467 589
468static void * devinfo_seq_next(struct seq_file *m, void *v, loff_t *pos) 590static void *devinfo_seq_next(struct seq_file *m, void *v, loff_t *ppos)
469{ 591{
470 return seq_list_next(v, &scsi_dev_info_list, pos); 592 struct double_list *dl = v;
593 struct scsi_dev_info_list_table *devinfo_table =
594 list_entry(dl->top, struct scsi_dev_info_list_table, node);
595
596 ++*ppos;
597 dl->bottom = dl->bottom->next;
598 while (&devinfo_table->scsi_dev_info_list == dl->bottom) {
599 dl->top = dl->top->next;
600 if (dl->top == &scsi_dev_info_list) {
601 kfree(dl);
602 return NULL;
603 }
604 devinfo_table = list_entry(dl->top,
605 struct scsi_dev_info_list_table,
606 node);
607 dl->bottom = devinfo_table->scsi_dev_info_list.next;
608 }
609
610 return dl;
471} 611}
472 612
473static void devinfo_seq_stop(struct seq_file *m, void *v) 613static void devinfo_seq_stop(struct seq_file *m, void *v)
474{ 614{
615 kfree(v);
475} 616}
476 617
477static const struct seq_operations scsi_devinfo_seq_ops = { 618static const struct seq_operations scsi_devinfo_seq_ops = {
@@ -548,19 +689,78 @@ MODULE_PARM_DESC(default_dev_flags,
548 **/ 689 **/
549void scsi_exit_devinfo(void) 690void scsi_exit_devinfo(void)
550{ 691{
551 struct list_head *lh, *lh_next;
552 struct scsi_dev_info_list *devinfo;
553
554#ifdef CONFIG_SCSI_PROC_FS 692#ifdef CONFIG_SCSI_PROC_FS
555 remove_proc_entry("scsi/device_info", NULL); 693 remove_proc_entry("scsi/device_info", NULL);
556#endif 694#endif
557 695
558 list_for_each_safe(lh, lh_next, &scsi_dev_info_list) { 696 scsi_dev_info_remove_list(SCSI_DEVINFO_GLOBAL);
697}
698
699/**
700 * scsi_dev_info_add_list - add a new devinfo list
701 * @key: key of the list to add
702 * @name: Name of the list to add (for /proc/scsi/device_info)
703 *
704 * Adds the requested list, returns zero on success, -EEXIST if the
705 * key is already registered to a list, or other error on failure.
706 */
707int scsi_dev_info_add_list(int key, const char *name)
708{
709 struct scsi_dev_info_list_table *devinfo_table =
710 scsi_devinfo_lookup_by_key(key);
711
712 if (!IS_ERR(devinfo_table))
713 /* list already exists */
714 return -EEXIST;
715
716 devinfo_table = kmalloc(sizeof(*devinfo_table), GFP_KERNEL);
717
718 if (!devinfo_table)
719 return -ENOMEM;
720
721 INIT_LIST_HEAD(&devinfo_table->node);
722 INIT_LIST_HEAD(&devinfo_table->scsi_dev_info_list);
723 devinfo_table->name = name;
724 devinfo_table->key = key;
725 list_add_tail(&devinfo_table->node, &scsi_dev_info_list);
726
727 return 0;
728}
729EXPORT_SYMBOL(scsi_dev_info_add_list);
730
731/**
732 * scsi_dev_info_remove_list - destroy an added devinfo list
733 * @key: key of the list to destroy
734 *
735 * Iterates over the entire list first, freeing all the values, then
736 * frees the list itself. Returns 0 on success or -EINVAL if the key
737 * can't be found.
738 */
739int scsi_dev_info_remove_list(int key)
740{
741 struct list_head *lh, *lh_next;
742 struct scsi_dev_info_list_table *devinfo_table =
743 scsi_devinfo_lookup_by_key(key);
744
745 if (IS_ERR(devinfo_table))
746 /* no such list */
747 return -EINVAL;
748
749 /* remove from the master list */
750 list_del(&devinfo_table->node);
751
752 list_for_each_safe(lh, lh_next, &devinfo_table->scsi_dev_info_list) {
753 struct scsi_dev_info_list *devinfo;
754
559 devinfo = list_entry(lh, struct scsi_dev_info_list, 755 devinfo = list_entry(lh, struct scsi_dev_info_list,
560 dev_info_list); 756 dev_info_list);
561 kfree(devinfo); 757 kfree(devinfo);
562 } 758 }
759 kfree(devinfo_table);
760
761 return 0;
563} 762}
763EXPORT_SYMBOL(scsi_dev_info_remove_list);
564 764
565/** 765/**
566 * scsi_init_devinfo - set up the dynamic device list. 766 * scsi_init_devinfo - set up the dynamic device list.
@@ -576,10 +776,14 @@ int __init scsi_init_devinfo(void)
576#endif 776#endif
577 int error, i; 777 int error, i;
578 778
579 error = scsi_dev_info_list_add_str(scsi_dev_flags); 779 error = scsi_dev_info_add_list(SCSI_DEVINFO_GLOBAL, NULL);
580 if (error) 780 if (error)
581 return error; 781 return error;
582 782
783 error = scsi_dev_info_list_add_str(scsi_dev_flags);
784 if (error)
785 goto out;
786
583 for (i = 0; scsi_static_device_list[i].vendor; i++) { 787 for (i = 0; scsi_static_device_list[i].vendor; i++) {
584 error = scsi_dev_info_list_add(1 /* compatibile */, 788 error = scsi_dev_info_list_add(1 /* compatibile */,
585 scsi_static_device_list[i].vendor, 789 scsi_static_device_list[i].vendor,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 0c2c73be1974..a1689353d7fd 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -641,9 +641,9 @@ EXPORT_SYMBOL(scsi_eh_prep_cmnd);
641/** 641/**
642 * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory 642 * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory
643 * @scmd: SCSI command structure to restore 643 * @scmd: SCSI command structure to restore
644 * @ses: saved information from a coresponding call to scsi_prep_eh_cmnd 644 * @ses: saved information from a coresponding call to scsi_eh_prep_cmnd
645 * 645 *
646 * Undo any damage done by above scsi_prep_eh_cmnd(). 646 * Undo any damage done by above scsi_eh_prep_cmnd().
647 */ 647 */
648void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) 648void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
649{ 649{
@@ -1451,28 +1451,21 @@ static void eh_lock_door_done(struct request *req, int uptodate)
1451 * @sdev: SCSI device to prevent medium removal 1451 * @sdev: SCSI device to prevent medium removal
1452 * 1452 *
1453 * Locking: 1453 * Locking:
1454 * We must be called from process context; scsi_allocate_request() 1454 * We must be called from process context.
1455 * may sleep.
1456 * 1455 *
1457 * Notes: 1456 * Notes:
1458 * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the 1457 * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the
1459 * head of the devices request queue, and continue. 1458 * head of the devices request queue, and continue.
1460 *
1461 * Bugs:
1462 * scsi_allocate_request() may sleep waiting for existing requests to
1463 * be processed. However, since we haven't kicked off any request
1464 * processing for this host, this may deadlock.
1465 *
1466 * If scsi_allocate_request() fails for what ever reason, we
1467 * completely forget to lock the door.
1468 */ 1459 */
1469static void scsi_eh_lock_door(struct scsi_device *sdev) 1460static void scsi_eh_lock_door(struct scsi_device *sdev)
1470{ 1461{
1471 struct request *req; 1462 struct request *req;
1472 1463
1464 /*
1465 * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a
1466 * request becomes available
1467 */
1473 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); 1468 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
1474 if (!req)
1475 return;
1476 1469
1477 req->cmd[0] = ALLOW_MEDIUM_REMOVAL; 1470 req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
1478 req->cmd[1] = 0; 1471 req->cmd[1] = 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index bb218c8b6e98..f3c40898fc7d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -240,11 +240,11 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
240 * is invalid. Prevent the garbage from being misinterpreted 240 * is invalid. Prevent the garbage from being misinterpreted
241 * and prevent security leaks by zeroing out the excess data. 241 * and prevent security leaks by zeroing out the excess data.
242 */ 242 */
243 if (unlikely(req->data_len > 0 && req->data_len <= bufflen)) 243 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244 memset(buffer + (bufflen - req->data_len), 0, req->data_len); 244 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
245 245
246 if (resid) 246 if (resid)
247 *resid = req->data_len; 247 *resid = req->resid_len;
248 ret = req->errors; 248 ret = req->errors;
249 out: 249 out:
250 blk_put_request(req); 250 blk_put_request(req);
@@ -546,14 +546,9 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
546 * to queue the remainder of them. 546 * to queue the remainder of them.
547 */ 547 */
548 if (blk_end_request(req, error, bytes)) { 548 if (blk_end_request(req, error, bytes)) {
549 int leftover = (req->hard_nr_sectors << 9);
550
551 if (blk_pc_request(req))
552 leftover = req->data_len;
553
554 /* kill remainder if no retrys */ 549 /* kill remainder if no retrys */
555 if (error && scsi_noretry_cmd(cmd)) 550 if (error && scsi_noretry_cmd(cmd))
556 blk_end_request(req, error, leftover); 551 blk_end_request_all(req, error);
557 else { 552 else {
558 if (requeue) { 553 if (requeue) {
559 /* 554 /*
@@ -673,34 +668,6 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
673EXPORT_SYMBOL(scsi_release_buffers); 668EXPORT_SYMBOL(scsi_release_buffers);
674 669
675/* 670/*
676 * Bidi commands Must be complete as a whole, both sides at once.
677 * If part of the bytes were written and lld returned
678 * scsi_in()->resid and/or scsi_out()->resid this information will be left
679 * in req->data_len and req->next_rq->data_len. The upper-layer driver can
680 * decide what to do with this information.
681 */
682static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
683{
684 struct request *req = cmd->request;
685 unsigned int dlen = req->data_len;
686 unsigned int next_dlen = req->next_rq->data_len;
687
688 req->data_len = scsi_out(cmd)->resid;
689 req->next_rq->data_len = scsi_in(cmd)->resid;
690
691 /* The req and req->next_rq have not been completed */
692 BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
693
694 scsi_release_buffers(cmd);
695
696 /*
697 * This will goose the queue request function at the end, so we don't
698 * need to worry about launching another command.
699 */
700 scsi_next_command(cmd);
701}
702
703/*
704 * Function: scsi_io_completion() 671 * Function: scsi_io_completion()
705 * 672 *
706 * Purpose: Completion processing for block device I/O requests. 673 * Purpose: Completion processing for block device I/O requests.
@@ -739,7 +706,6 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
739void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 706void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
740{ 707{
741 int result = cmd->result; 708 int result = cmd->result;
742 int this_count;
743 struct request_queue *q = cmd->device->request_queue; 709 struct request_queue *q = cmd->device->request_queue;
744 struct request *req = cmd->request; 710 struct request *req = cmd->request;
745 int error = 0; 711 int error = 0;
@@ -773,12 +739,22 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
773 if (!sense_deferred) 739 if (!sense_deferred)
774 error = -EIO; 740 error = -EIO;
775 } 741 }
742
743 req->resid_len = scsi_get_resid(cmd);
744
776 if (scsi_bidi_cmnd(cmd)) { 745 if (scsi_bidi_cmnd(cmd)) {
777 /* will also release_buffers */ 746 /*
778 scsi_end_bidi_request(cmd); 747 * Bidi commands Must be complete as a whole,
748 * both sides at once.
749 */
750 req->next_rq->resid_len = scsi_in(cmd)->resid;
751
752 blk_end_request_all(req, 0);
753
754 scsi_release_buffers(cmd);
755 scsi_next_command(cmd);
779 return; 756 return;
780 } 757 }
781 req->data_len = scsi_get_resid(cmd);
782 } 758 }
783 759
784 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ 760 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
@@ -787,9 +763,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
787 * Next deal with any sectors which we were able to correctly 763 * Next deal with any sectors which we were able to correctly
788 * handle. 764 * handle.
789 */ 765 */
790 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 766 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
791 "%d bytes done.\n", 767 "%d bytes done.\n",
792 req->nr_sectors, good_bytes)); 768 blk_rq_sectors(req), good_bytes));
793 769
794 /* 770 /*
795 * Recovered errors need reporting, but they're always treated 771 * Recovered errors need reporting, but they're always treated
@@ -812,7 +788,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
812 */ 788 */
813 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 789 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
814 return; 790 return;
815 this_count = blk_rq_bytes(req);
816 791
817 error = -EIO; 792 error = -EIO;
818 793
@@ -922,7 +897,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
922 if (driver_byte(result) & DRIVER_SENSE) 897 if (driver_byte(result) & DRIVER_SENSE)
923 scsi_print_sense("", cmd); 898 scsi_print_sense("", cmd);
924 } 899 }
925 blk_end_request(req, -EIO, blk_rq_bytes(req)); 900 blk_end_request_all(req, -EIO);
926 scsi_next_command(cmd); 901 scsi_next_command(cmd);
927 break; 902 break;
928 case ACTION_REPREP: 903 case ACTION_REPREP:
@@ -965,10 +940,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
965 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 940 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
966 BUG_ON(count > sdb->table.nents); 941 BUG_ON(count > sdb->table.nents);
967 sdb->table.nents = count; 942 sdb->table.nents = count;
968 if (blk_pc_request(req)) 943 sdb->length = blk_rq_bytes(req);
969 sdb->length = req->data_len;
970 else
971 sdb->length = req->nr_sectors << 9;
972 return BLKPREP_OK; 944 return BLKPREP_OK;
973} 945}
974 946
@@ -1087,22 +1059,21 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1087 if (unlikely(ret)) 1059 if (unlikely(ret))
1088 return ret; 1060 return ret;
1089 } else { 1061 } else {
1090 BUG_ON(req->data_len); 1062 BUG_ON(blk_rq_bytes(req));
1091 BUG_ON(req->data);
1092 1063
1093 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1064 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1094 req->buffer = NULL; 1065 req->buffer = NULL;
1095 } 1066 }
1096 1067
1097 cmd->cmd_len = req->cmd_len; 1068 cmd->cmd_len = req->cmd_len;
1098 if (!req->data_len) 1069 if (!blk_rq_bytes(req))
1099 cmd->sc_data_direction = DMA_NONE; 1070 cmd->sc_data_direction = DMA_NONE;
1100 else if (rq_data_dir(req) == WRITE) 1071 else if (rq_data_dir(req) == WRITE)
1101 cmd->sc_data_direction = DMA_TO_DEVICE; 1072 cmd->sc_data_direction = DMA_TO_DEVICE;
1102 else 1073 else
1103 cmd->sc_data_direction = DMA_FROM_DEVICE; 1074 cmd->sc_data_direction = DMA_FROM_DEVICE;
1104 1075
1105 cmd->transfersize = req->data_len; 1076 cmd->transfersize = blk_rq_bytes(req);
1106 cmd->allowed = req->retries; 1077 cmd->allowed = req->retries;
1107 return BLKPREP_OK; 1078 return BLKPREP_OK;
1108} 1079}
@@ -1212,7 +1183,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1212 break; 1183 break;
1213 case BLKPREP_DEFER: 1184 case BLKPREP_DEFER:
1214 /* 1185 /*
1215 * If we defer, the elv_next_request() returns NULL, but the 1186 * If we defer, the blk_peek_request() returns NULL, but the
1216 * queue must be restarted, so we plug here if no returning 1187 * queue must be restarted, so we plug here if no returning
1217 * command will automatically do that. 1188 * command will automatically do that.
1218 */ 1189 */
@@ -1236,6 +1207,7 @@ int scsi_prep_fn(struct request_queue *q, struct request *req)
1236 ret = scsi_setup_blk_pc_cmnd(sdev, req); 1207 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1237 return scsi_prep_return(q, req, ret); 1208 return scsi_prep_return(q, req, ret);
1238} 1209}
1210EXPORT_SYMBOL(scsi_prep_fn);
1239 1211
1240/* 1212/*
1241 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1213 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
@@ -1388,7 +1360,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1388 struct scsi_target *starget = scsi_target(sdev); 1360 struct scsi_target *starget = scsi_target(sdev);
1389 struct Scsi_Host *shost = sdev->host; 1361 struct Scsi_Host *shost = sdev->host;
1390 1362
1391 blkdev_dequeue_request(req); 1363 blk_start_request(req);
1392 1364
1393 if (unlikely(cmd == NULL)) { 1365 if (unlikely(cmd == NULL)) {
1394 printk(KERN_CRIT "impossible request in %s.\n", 1366 printk(KERN_CRIT "impossible request in %s.\n",
@@ -1480,7 +1452,7 @@ static void scsi_request_fn(struct request_queue *q)
1480 1452
1481 if (!sdev) { 1453 if (!sdev) {
1482 printk("scsi: killing requests for dead queue\n"); 1454 printk("scsi: killing requests for dead queue\n");
1483 while ((req = elv_next_request(q)) != NULL) 1455 while ((req = blk_peek_request(q)) != NULL)
1484 scsi_kill_request(req, q); 1456 scsi_kill_request(req, q);
1485 return; 1457 return;
1486 } 1458 }
@@ -1501,7 +1473,7 @@ static void scsi_request_fn(struct request_queue *q)
1501 * that the request is fully prepared even if we cannot 1473 * that the request is fully prepared even if we cannot
1502 * accept it. 1474 * accept it.
1503 */ 1475 */
1504 req = elv_next_request(q); 1476 req = blk_peek_request(q);
1505 if (!req || !scsi_dev_queue_ready(q, sdev)) 1477 if (!req || !scsi_dev_queue_ready(q, sdev))
1506 break; 1478 break;
1507 1479
@@ -1517,7 +1489,7 @@ static void scsi_request_fn(struct request_queue *q)
1517 * Remove the request from the request list. 1489 * Remove the request from the request list.
1518 */ 1490 */
1519 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1491 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1520 blkdev_dequeue_request(req); 1492 blk_start_request(req);
1521 sdev->device_busy++; 1493 sdev->device_busy++;
1522 1494
1523 spin_unlock(q->queue_lock); 1495 spin_unlock(q->queue_lock);
@@ -2441,20 +2413,18 @@ int
2441scsi_internal_device_unblock(struct scsi_device *sdev) 2413scsi_internal_device_unblock(struct scsi_device *sdev)
2442{ 2414{
2443 struct request_queue *q = sdev->request_queue; 2415 struct request_queue *q = sdev->request_queue;
2444 int err;
2445 unsigned long flags; 2416 unsigned long flags;
2446 2417
2447 /* 2418 /*
2448 * Try to transition the scsi device to SDEV_RUNNING 2419 * Try to transition the scsi device to SDEV_RUNNING
2449 * and goose the device queue if successful. 2420 * and goose the device queue if successful.
2450 */ 2421 */
2451 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2422 if (sdev->sdev_state == SDEV_BLOCK)
2452 if (err) { 2423 sdev->sdev_state = SDEV_RUNNING;
2453 err = scsi_device_set_state(sdev, SDEV_CREATED); 2424 else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2454 2425 sdev->sdev_state = SDEV_CREATED;
2455 if (err) 2426 else
2456 return err; 2427 return -EINVAL;
2457 }
2458 2428
2459 spin_lock_irqsave(q->queue_lock, flags); 2429 spin_lock_irqsave(q->queue_lock, flags);
2460 blk_start_queue(q); 2430 blk_start_queue(q);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index fbc83bebdd8e..021e503c8c44 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -39,9 +39,25 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
39#endif 39#endif
40 40
41/* scsi_devinfo.c */ 41/* scsi_devinfo.c */
42
43/* list of keys for the lists */
44enum {
45 SCSI_DEVINFO_GLOBAL = 0,
46 SCSI_DEVINFO_SPI,
47};
48
42extern int scsi_get_device_flags(struct scsi_device *sdev, 49extern int scsi_get_device_flags(struct scsi_device *sdev,
43 const unsigned char *vendor, 50 const unsigned char *vendor,
44 const unsigned char *model); 51 const unsigned char *model);
52extern int scsi_get_device_flags_keyed(struct scsi_device *sdev,
53 const unsigned char *vendor,
54 const unsigned char *model, int key);
55extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
56 char *model, char *strflags,
57 int flags, int key);
58extern int scsi_dev_info_add_list(int key, const char *name);
59extern int scsi_dev_info_remove_list(int key);
60
45extern int __init scsi_init_devinfo(void); 61extern int __init scsi_init_devinfo(void);
46extern void scsi_exit_devinfo(void); 62extern void scsi_exit_devinfo(void);
47 63
@@ -71,7 +87,6 @@ extern int scsi_init_queue(void);
71extern void scsi_exit_queue(void); 87extern void scsi_exit_queue(void);
72struct request_queue; 88struct request_queue;
73struct request; 89struct request;
74extern int scsi_prep_fn(struct request_queue *, struct request *);
75extern struct kmem_cache *scsi_sdb_cache; 90extern struct kmem_cache *scsi_sdb_cache;
76 91
77/* scsi_proc.c */ 92/* scsi_proc.c */
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index e2b50d8f57a8..c44783801402 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -115,12 +115,12 @@ MODULE_PARM_DESC(max_report_luns,
115 "REPORT LUNS maximum number of LUNS received (should be" 115 "REPORT LUNS maximum number of LUNS received (should be"
116 " between 1 and 16384)"); 116 " between 1 and 16384)");
117 117
118static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ+3; 118static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
119 119
120module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); 120module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
121MODULE_PARM_DESC(inq_timeout, 121MODULE_PARM_DESC(inq_timeout,
122 "Timeout (in seconds) waiting for devices to answer INQUIRY." 122 "Timeout (in seconds) waiting for devices to answer INQUIRY."
123 " Default is 5. Some non-compliant devices need more."); 123 " Default is 20. Some devices may need more; most need less.");
124 124
125/* This lock protects only this list */ 125/* This lock protects only this list */
126static DEFINE_SPINLOCK(async_scan_lock); 126static DEFINE_SPINLOCK(async_scan_lock);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index fa4711d12744..91482f2dcc50 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -420,29 +420,12 @@ static int scsi_bus_resume(struct device * dev)
420 return err; 420 return err;
421} 421}
422 422
423static int scsi_bus_remove(struct device *dev)
424{
425 struct device_driver *drv = dev->driver;
426 struct scsi_device *sdev = to_scsi_device(dev);
427 int err = 0;
428
429 /* reset the prep_fn back to the default since the
430 * driver may have altered it and it's being removed */
431 blk_queue_prep_rq(sdev->request_queue, scsi_prep_fn);
432
433 if (drv && drv->remove)
434 err = drv->remove(dev);
435
436 return 0;
437}
438
439struct bus_type scsi_bus_type = { 423struct bus_type scsi_bus_type = {
440 .name = "scsi", 424 .name = "scsi",
441 .match = scsi_bus_match, 425 .match = scsi_bus_match,
442 .uevent = scsi_bus_uevent, 426 .uevent = scsi_bus_uevent,
443 .suspend = scsi_bus_suspend, 427 .suspend = scsi_bus_suspend,
444 .resume = scsi_bus_resume, 428 .resume = scsi_bus_resume,
445 .remove = scsi_bus_remove,
446}; 429};
447EXPORT_SYMBOL_GPL(scsi_bus_type); 430EXPORT_SYMBOL_GPL(scsi_bus_type);
448 431
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 48ba413f7f6a..10303272ba45 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -387,7 +387,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
387 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the 387 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the
388 * length for us. 388 * length for us.
389 */ 389 */
390 cmd->sdb.length = rq->data_len; 390 cmd->sdb.length = blk_rq_bytes(rq);
391 391
392 return 0; 392 return 0;
393 393
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index a152f89ae51c..2eee9e6e4fe8 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -35,6 +35,7 @@
35#include <linux/netlink.h> 35#include <linux/netlink.h>
36#include <net/netlink.h> 36#include <net/netlink.h>
37#include <scsi/scsi_netlink_fc.h> 37#include <scsi/scsi_netlink_fc.h>
38#include <scsi/scsi_bsg_fc.h>
38#include "scsi_priv.h" 39#include "scsi_priv.h"
39#include "scsi_transport_fc_internal.h" 40#include "scsi_transport_fc_internal.h"
40 41
@@ -43,6 +44,10 @@ static void fc_vport_sched_delete(struct work_struct *work);
43static int fc_vport_setup(struct Scsi_Host *shost, int channel, 44static int fc_vport_setup(struct Scsi_Host *shost, int channel,
44 struct device *pdev, struct fc_vport_identifiers *ids, 45 struct device *pdev, struct fc_vport_identifiers *ids,
45 struct fc_vport **vport); 46 struct fc_vport **vport);
47static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
48static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
49static void fc_bsg_remove(struct request_queue *);
50static void fc_bsg_goose_queue(struct fc_rport *);
46 51
47/* 52/*
48 * Redefine so that we can have same named attributes in the 53 * Redefine so that we can have same named attributes in the
@@ -411,13 +416,26 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
411 return -ENOMEM; 416 return -ENOMEM;
412 } 417 }
413 418
419 fc_bsg_hostadd(shost, fc_host);
420 /* ignore any bsg add error - we just can't do sgio */
421
422 return 0;
423}
424
425static int fc_host_remove(struct transport_container *tc, struct device *dev,
426 struct device *cdev)
427{
428 struct Scsi_Host *shost = dev_to_shost(dev);
429 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
430
431 fc_bsg_remove(fc_host->rqst_q);
414 return 0; 432 return 0;
415} 433}
416 434
417static DECLARE_TRANSPORT_CLASS(fc_host_class, 435static DECLARE_TRANSPORT_CLASS(fc_host_class,
418 "fc_host", 436 "fc_host",
419 fc_host_setup, 437 fc_host_setup,
420 NULL, 438 fc_host_remove,
421 NULL); 439 NULL);
422 440
423/* 441/*
@@ -2375,6 +2393,7 @@ fc_rport_final_delete(struct work_struct *work)
2375 scsi_flush_work(shost); 2393 scsi_flush_work(shost);
2376 2394
2377 fc_terminate_rport_io(rport); 2395 fc_terminate_rport_io(rport);
2396
2378 /* 2397 /*
2379 * Cancel any outstanding timers. These should really exist 2398 * Cancel any outstanding timers. These should really exist
2380 * only when rmmod'ing the LLDD and we're asking for 2399 * only when rmmod'ing the LLDD and we're asking for
@@ -2407,6 +2426,8 @@ fc_rport_final_delete(struct work_struct *work)
2407 (i->f->dev_loss_tmo_callbk)) 2426 (i->f->dev_loss_tmo_callbk))
2408 i->f->dev_loss_tmo_callbk(rport); 2427 i->f->dev_loss_tmo_callbk(rport);
2409 2428
2429 fc_bsg_remove(rport->rqst_q);
2430
2410 transport_remove_device(dev); 2431 transport_remove_device(dev);
2411 device_del(dev); 2432 device_del(dev);
2412 transport_destroy_device(dev); 2433 transport_destroy_device(dev);
@@ -2494,6 +2515,9 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
2494 transport_add_device(dev); 2515 transport_add_device(dev);
2495 transport_configure_device(dev); 2516 transport_configure_device(dev);
2496 2517
2518 fc_bsg_rportadd(shost, rport);
2519 /* ignore any bsg add error - we just can't do sgio */
2520
2497 if (rport->roles & FC_PORT_ROLE_FCP_TARGET) { 2521 if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
2498 /* initiate a scan of the target */ 2522 /* initiate a scan of the target */
2499 rport->flags |= FC_RPORT_SCAN_PENDING; 2523 rport->flags |= FC_RPORT_SCAN_PENDING;
@@ -2658,6 +2682,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2658 spin_unlock_irqrestore(shost->host_lock, 2682 spin_unlock_irqrestore(shost->host_lock,
2659 flags); 2683 flags);
2660 2684
2685 fc_bsg_goose_queue(rport);
2686
2661 return rport; 2687 return rport;
2662 } 2688 }
2663 } 2689 }
@@ -3343,6 +3369,596 @@ fc_vport_sched_delete(struct work_struct *work)
3343} 3369}
3344 3370
3345 3371
3372/*
3373 * BSG support
3374 */
3375
3376
3377/**
3378 * fc_destroy_bsgjob - routine to teardown/delete a fc bsg job
3379 * @job: fc_bsg_job that is to be torn down
3380 */
3381static void
3382fc_destroy_bsgjob(struct fc_bsg_job *job)
3383{
3384 unsigned long flags;
3385
3386 spin_lock_irqsave(&job->job_lock, flags);
3387 if (job->ref_cnt) {
3388 spin_unlock_irqrestore(&job->job_lock, flags);
3389 return;
3390 }
3391 spin_unlock_irqrestore(&job->job_lock, flags);
3392
3393 put_device(job->dev); /* release reference for the request */
3394
3395 kfree(job->request_payload.sg_list);
3396 kfree(job->reply_payload.sg_list);
3397 kfree(job);
3398}
3399
3400/**
3401 * fc_bsg_jobdone - completion routine for bsg requests that the LLD has
3402 * completed
3403 * @job: fc_bsg_job that is complete
3404 */
3405static void
3406fc_bsg_jobdone(struct fc_bsg_job *job)
3407{
3408 struct request *req = job->req;
3409 struct request *rsp = req->next_rq;
3410 int err;
3411
3412 err = job->req->errors = job->reply->result;
3413
3414 if (err < 0)
3415 /* we're only returning the result field in the reply */
3416 job->req->sense_len = sizeof(uint32_t);
3417 else
3418 job->req->sense_len = job->reply_len;
3419
3420 /* we assume all request payload was transferred, residual == 0 */
3421 req->resid_len = 0;
3422
3423 if (rsp) {
3424 WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len);
3425
3426 /* set reply (bidi) residual */
3427 rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
3428 rsp->resid_len);
3429 }
3430 blk_complete_request(req);
3431}
3432
3433/**
3434 * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests
3435 * @req: BSG request that holds the job to be destroyed
3436 */
3437static void fc_bsg_softirq_done(struct request *rq)
3438{
3439 struct fc_bsg_job *job = rq->special;
3440 unsigned long flags;
3441
3442 spin_lock_irqsave(&job->job_lock, flags);
3443 job->state_flags |= FC_RQST_STATE_DONE;
3444 job->ref_cnt--;
3445 spin_unlock_irqrestore(&job->job_lock, flags);
3446
3447 blk_end_request_all(rq, rq->errors);
3448 fc_destroy_bsgjob(job);
3449}
3450
3451/**
3452 * fc_bsg_job_timeout - handler for when a bsg request timesout
3453 * @req: request that timed out
3454 */
3455static enum blk_eh_timer_return
3456fc_bsg_job_timeout(struct request *req)
3457{
3458 struct fc_bsg_job *job = (void *) req->special;
3459 struct Scsi_Host *shost = job->shost;
3460 struct fc_internal *i = to_fc_internal(shost->transportt);
3461 unsigned long flags;
3462 int err = 0, done = 0;
3463
3464 if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED)
3465 return BLK_EH_RESET_TIMER;
3466
3467 spin_lock_irqsave(&job->job_lock, flags);
3468 if (job->state_flags & FC_RQST_STATE_DONE)
3469 done = 1;
3470 else
3471 job->ref_cnt++;
3472 spin_unlock_irqrestore(&job->job_lock, flags);
3473
3474 if (!done && i->f->bsg_timeout) {
3475 /* call LLDD to abort the i/o as it has timed out */
3476 err = i->f->bsg_timeout(job);
3477 if (err)
3478 printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
3479 "abort failed with status %d\n", err);
3480 }
3481
3482 /* the blk_end_sync_io() doesn't check the error */
3483 if (done)
3484 return BLK_EH_NOT_HANDLED;
3485 else
3486 return BLK_EH_HANDLED;
3487}
3488
3489static int
3490fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
3491{
3492 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
3493
3494 BUG_ON(!req->nr_phys_segments);
3495
3496 buf->sg_list = kzalloc(sz, GFP_KERNEL);
3497 if (!buf->sg_list)
3498 return -ENOMEM;
3499 sg_init_table(buf->sg_list, req->nr_phys_segments);
3500 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
3501 buf->payload_len = blk_rq_bytes(req);
3502 return 0;
3503}
3504
3505
3506/**
3507 * fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the
3508 * bsg request
3509 * @shost: SCSI Host corresponding to the bsg object
3510 * @rport: (optional) FC Remote Port corresponding to the bsg object
3511 * @req: BSG request that needs a job structure
3512 */
3513static int
3514fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport,
3515 struct request *req)
3516{
3517 struct fc_internal *i = to_fc_internal(shost->transportt);
3518 struct request *rsp = req->next_rq;
3519 struct fc_bsg_job *job;
3520 int ret;
3521
3522 BUG_ON(req->special);
3523
3524 job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size,
3525 GFP_KERNEL);
3526 if (!job)
3527 return -ENOMEM;
3528
3529 /*
3530 * Note: this is a bit silly.
3531 * The request gets formatted as a SGIO v4 ioctl request, which
3532 * then gets reformatted as a blk request, which then gets
3533 * reformatted as a fc bsg request. And on completion, we have
3534 * to wrap return results such that SGIO v4 thinks it was a scsi
3535 * status. I hope this was all worth it.
3536 */
3537
3538 req->special = job;
3539 job->shost = shost;
3540 job->rport = rport;
3541 job->req = req;
3542 if (i->f->dd_bsg_size)
3543 job->dd_data = (void *)&job[1];
3544 spin_lock_init(&job->job_lock);
3545 job->request = (struct fc_bsg_request *)req->cmd;
3546 job->request_len = req->cmd_len;
3547 job->reply = req->sense;
3548 job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
3549 * allocated */
3550 if (req->bio) {
3551 ret = fc_bsg_map_buffer(&job->request_payload, req);
3552 if (ret)
3553 goto failjob_rls_job;
3554 }
3555 if (rsp && rsp->bio) {
3556 ret = fc_bsg_map_buffer(&job->reply_payload, rsp);
3557 if (ret)
3558 goto failjob_rls_rqst_payload;
3559 }
3560 job->job_done = fc_bsg_jobdone;
3561 if (rport)
3562 job->dev = &rport->dev;
3563 else
3564 job->dev = &shost->shost_gendev;
3565 get_device(job->dev); /* take a reference for the request */
3566
3567 job->ref_cnt = 1;
3568
3569 return 0;
3570
3571
3572failjob_rls_rqst_payload:
3573 kfree(job->request_payload.sg_list);
3574failjob_rls_job:
3575 kfree(job);
3576 return -ENOMEM;
3577}
3578
3579
3580enum fc_dispatch_result {
3581 FC_DISPATCH_BREAK, /* on return, q is locked, break from q loop */
3582 FC_DISPATCH_LOCKED, /* on return, q is locked, continue on */
3583 FC_DISPATCH_UNLOCKED, /* on return, q is unlocked, continue on */
3584};
3585
3586
3587/**
3588 * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
3589 * @shost: scsi host rport attached to
3590 * @job: bsg job to be processed
3591 */
3592static enum fc_dispatch_result
3593fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3594 struct fc_bsg_job *job)
3595{
3596 struct fc_internal *i = to_fc_internal(shost->transportt);
3597 int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
3598 int ret;
3599
3600 /* Validate the host command */
3601 switch (job->request->msgcode) {
3602 case FC_BSG_HST_ADD_RPORT:
3603 cmdlen += sizeof(struct fc_bsg_host_add_rport);
3604 break;
3605
3606 case FC_BSG_HST_DEL_RPORT:
3607 cmdlen += sizeof(struct fc_bsg_host_del_rport);
3608 break;
3609
3610 case FC_BSG_HST_ELS_NOLOGIN:
3611 cmdlen += sizeof(struct fc_bsg_host_els);
3612 /* there better be a xmt and rcv payloads */
3613 if ((!job->request_payload.payload_len) ||
3614 (!job->reply_payload.payload_len)) {
3615 ret = -EINVAL;
3616 goto fail_host_msg;
3617 }
3618 break;
3619
3620 case FC_BSG_HST_CT:
3621 cmdlen += sizeof(struct fc_bsg_host_ct);
3622 /* there better be xmt and rcv payloads */
3623 if ((!job->request_payload.payload_len) ||
3624 (!job->reply_payload.payload_len)) {
3625 ret = -EINVAL;
3626 goto fail_host_msg;
3627 }
3628 break;
3629
3630 case FC_BSG_HST_VENDOR:
3631 cmdlen += sizeof(struct fc_bsg_host_vendor);
3632 if ((shost->hostt->vendor_id == 0L) ||
3633 (job->request->rqst_data.h_vendor.vendor_id !=
3634 shost->hostt->vendor_id)) {
3635 ret = -ESRCH;
3636 goto fail_host_msg;
3637 }
3638 break;
3639
3640 default:
3641 ret = -EBADR;
3642 goto fail_host_msg;
3643 }
3644
3645 /* check if we really have all the request data needed */
3646 if (job->request_len < cmdlen) {
3647 ret = -ENOMSG;
3648 goto fail_host_msg;
3649 }
3650
3651 ret = i->f->bsg_request(job);
3652 if (!ret)
3653 return FC_DISPATCH_UNLOCKED;
3654
3655fail_host_msg:
3656 /* return the errno failure code as the only status */
3657 BUG_ON(job->reply_len < sizeof(uint32_t));
3658 job->reply->result = ret;
3659 job->reply_len = sizeof(uint32_t);
3660 fc_bsg_jobdone(job);
3661 return FC_DISPATCH_UNLOCKED;
3662}
3663
3664
3665/*
3666 * fc_bsg_goose_queue - restart rport queue in case it was stopped
3667 * @rport: rport to be restarted
3668 */
3669static void
3670fc_bsg_goose_queue(struct fc_rport *rport)
3671{
3672 int flagset;
3673
3674 if (!rport->rqst_q)
3675 return;
3676
3677 get_device(&rport->dev);
3678
3679 spin_lock(rport->rqst_q->queue_lock);
3680 flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
3681 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
3682 if (flagset)
3683 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
3684 __blk_run_queue(rport->rqst_q);
3685 if (flagset)
3686 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
3687 spin_unlock(rport->rqst_q->queue_lock);
3688
3689 put_device(&rport->dev);
3690}
3691
3692
3693/**
3694 * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
3695 * @shost: scsi host rport attached to
3696 * @rport: rport request destined to
3697 * @job: bsg job to be processed
3698 */
3699static enum fc_dispatch_result
3700fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3701 struct fc_rport *rport, struct fc_bsg_job *job)
3702{
3703 struct fc_internal *i = to_fc_internal(shost->transportt);
3704 int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
3705 int ret;
3706
3707 /* Validate the rport command */
3708 switch (job->request->msgcode) {
3709 case FC_BSG_RPT_ELS:
3710 cmdlen += sizeof(struct fc_bsg_rport_els);
3711 goto check_bidi;
3712
3713 case FC_BSG_RPT_CT:
3714 cmdlen += sizeof(struct fc_bsg_rport_ct);
3715check_bidi:
3716 /* there better be xmt and rcv payloads */
3717 if ((!job->request_payload.payload_len) ||
3718 (!job->reply_payload.payload_len)) {
3719 ret = -EINVAL;
3720 goto fail_rport_msg;
3721 }
3722 break;
3723 default:
3724 ret = -EBADR;
3725 goto fail_rport_msg;
3726 }
3727
3728 /* check if we really have all the request data needed */
3729 if (job->request_len < cmdlen) {
3730 ret = -ENOMSG;
3731 goto fail_rport_msg;
3732 }
3733
3734 ret = i->f->bsg_request(job);
3735 if (!ret)
3736 return FC_DISPATCH_UNLOCKED;
3737
3738fail_rport_msg:
3739 /* return the errno failure code as the only status */
3740 BUG_ON(job->reply_len < sizeof(uint32_t));
3741 job->reply->result = ret;
3742 job->reply_len = sizeof(uint32_t);
3743 fc_bsg_jobdone(job);
3744 return FC_DISPATCH_UNLOCKED;
3745}
3746
3747
3748/**
3749 * fc_bsg_request_handler - generic handler for bsg requests
3750 * @q: request queue to manage
3751 * @shost: Scsi_Host related to the bsg object
3752 * @rport: FC remote port related to the bsg object (optional)
3753 * @dev: device structure for bsg object
3754 */
3755static void
3756fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
3757 struct fc_rport *rport, struct device *dev)
3758{
3759 struct request *req;
3760 struct fc_bsg_job *job;
3761 enum fc_dispatch_result ret;
3762
3763 if (!get_device(dev))
3764 return;
3765
3766 while (!blk_queue_plugged(q)) {
3767 if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED))
3768 break;
3769
3770 req = blk_fetch_request(q);
3771 if (!req)
3772 break;
3773
3774 if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
3775 req->errors = -ENXIO;
3776 spin_unlock_irq(q->queue_lock);
3777 blk_end_request(req, -ENXIO, blk_rq_bytes(req));
3778 spin_lock_irq(q->queue_lock);
3779 continue;
3780 }
3781
3782 spin_unlock_irq(q->queue_lock);
3783
3784 ret = fc_req_to_bsgjob(shost, rport, req);
3785 if (ret) {
3786 req->errors = ret;
3787 blk_end_request(req, ret, blk_rq_bytes(req));
3788 spin_lock_irq(q->queue_lock);
3789 continue;
3790 }
3791
3792 job = req->special;
3793
3794 /* check if we have the msgcode value at least */
3795 if (job->request_len < sizeof(uint32_t)) {
3796 BUG_ON(job->reply_len < sizeof(uint32_t));
3797 job->reply->result = -ENOMSG;
3798 job->reply_len = sizeof(uint32_t);
3799 fc_bsg_jobdone(job);
3800 spin_lock_irq(q->queue_lock);
3801 continue;
3802 }
3803
3804 /* the dispatch routines will unlock the queue_lock */
3805 if (rport)
3806 ret = fc_bsg_rport_dispatch(q, shost, rport, job);
3807 else
3808 ret = fc_bsg_host_dispatch(q, shost, job);
3809
3810 /* did dispatcher hit state that can't process any more */
3811 if (ret == FC_DISPATCH_BREAK)
3812 break;
3813
3814 /* did dispatcher had released the lock */
3815 if (ret == FC_DISPATCH_UNLOCKED)
3816 spin_lock_irq(q->queue_lock);
3817 }
3818
3819 spin_unlock_irq(q->queue_lock);
3820 put_device(dev);
3821 spin_lock_irq(q->queue_lock);
3822}
3823
3824
3825/**
3826 * fc_bsg_host_handler - handler for bsg requests for a fc host
3827 * @q: fc host request queue
3828 */
3829static void
3830fc_bsg_host_handler(struct request_queue *q)
3831{
3832 struct Scsi_Host *shost = q->queuedata;
3833
3834 fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev);
3835}
3836
3837
3838/**
3839 * fc_bsg_rport_handler - handler for bsg requests for a fc rport
3840 * @q: rport request queue
3841 */
3842static void
3843fc_bsg_rport_handler(struct request_queue *q)
3844{
3845 struct fc_rport *rport = q->queuedata;
3846 struct Scsi_Host *shost = rport_to_shost(rport);
3847
3848 fc_bsg_request_handler(q, shost, rport, &rport->dev);
3849}
3850
3851
3852/**
3853 * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
3854 * @shost: shost for fc_host
3855 * @fc_host: fc_host adding the structures to
3856 */
3857static int
3858fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
3859{
3860 struct device *dev = &shost->shost_gendev;
3861 struct fc_internal *i = to_fc_internal(shost->transportt);
3862 struct request_queue *q;
3863 int err;
3864 char bsg_name[20];
3865
3866 fc_host->rqst_q = NULL;
3867
3868 if (!i->f->bsg_request)
3869 return -ENOTSUPP;
3870
3871 snprintf(bsg_name, sizeof(bsg_name),
3872 "fc_host%d", shost->host_no);
3873
3874 q = __scsi_alloc_queue(shost, fc_bsg_host_handler);
3875 if (!q) {
3876 printk(KERN_ERR "fc_host%d: bsg interface failed to "
3877 "initialize - no request queue\n",
3878 shost->host_no);
3879 return -ENOMEM;
3880 }
3881
3882 q->queuedata = shost;
3883 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
3884 blk_queue_softirq_done(q, fc_bsg_softirq_done);
3885 blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
3886 blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
3887
3888 err = bsg_register_queue(q, dev, bsg_name, NULL);
3889 if (err) {
3890 printk(KERN_ERR "fc_host%d: bsg interface failed to "
3891 "initialize - register queue\n",
3892 shost->host_no);
3893 blk_cleanup_queue(q);
3894 return err;
3895 }
3896
3897 fc_host->rqst_q = q;
3898 return 0;
3899}
3900
3901
3902/**
3903 * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
3904 * @shost: shost that rport is attached to
3905 * @rport: rport that the bsg hooks are being attached to
3906 */
3907static int
3908fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
3909{
3910 struct device *dev = &rport->dev;
3911 struct fc_internal *i = to_fc_internal(shost->transportt);
3912 struct request_queue *q;
3913 int err;
3914
3915 rport->rqst_q = NULL;
3916
3917 if (!i->f->bsg_request)
3918 return -ENOTSUPP;
3919
3920 q = __scsi_alloc_queue(shost, fc_bsg_rport_handler);
3921 if (!q) {
3922 printk(KERN_ERR "%s: bsg interface failed to "
3923 "initialize - no request queue\n",
3924 dev->kobj.name);
3925 return -ENOMEM;
3926 }
3927
3928 q->queuedata = rport;
3929 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
3930 blk_queue_softirq_done(q, fc_bsg_softirq_done);
3931 blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
3932 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
3933
3934 err = bsg_register_queue(q, dev, NULL, NULL);
3935 if (err) {
3936 printk(KERN_ERR "%s: bsg interface failed to "
3937 "initialize - register queue\n",
3938 dev->kobj.name);
3939 blk_cleanup_queue(q);
3940 return err;
3941 }
3942
3943 rport->rqst_q = q;
3944 return 0;
3945}
3946
3947
3948/**
3949 * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports
3950 * @q: the request_queue that is to be torn down.
3951 */
3952static void
3953fc_bsg_remove(struct request_queue *q)
3954{
3955 if (q) {
3956 bsg_unregister_queue(q);
3957 blk_cleanup_queue(q);
3958 }
3959}
3960
3961
3346/* Original Author: Martin Hicks */ 3962/* Original Author: Martin Hicks */
3347MODULE_AUTHOR("James Smart"); 3963MODULE_AUTHOR("James Smart");
3348MODULE_DESCRIPTION("FC Transport Attributes"); 3964MODULE_DESCRIPTION("FC Transport Attributes");
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0a2ce7b6325c..783e33c65eb7 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -37,7 +37,6 @@
37#define ISCSI_TRANSPORT_VERSION "2.0-870" 37#define ISCSI_TRANSPORT_VERSION "2.0-870"
38 38
39struct iscsi_internal { 39struct iscsi_internal {
40 int daemon_pid;
41 struct scsi_transport_template t; 40 struct scsi_transport_template t;
42 struct iscsi_transport *iscsi_transport; 41 struct iscsi_transport *iscsi_transport;
43 struct list_head list; 42 struct list_head list;
@@ -693,6 +692,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
693 "Too many iscsi targets. Max " 692 "Too many iscsi targets. Max "
694 "number of targets is %d.\n", 693 "number of targets is %d.\n",
695 ISCSI_MAX_TARGET - 1); 694 ISCSI_MAX_TARGET - 1);
695 err = -EOVERFLOW;
696 goto release_host; 696 goto release_host;
697 } 697 }
698 } 698 }
@@ -938,23 +938,9 @@ iscsi_if_transport_lookup(struct iscsi_transport *tt)
938} 938}
939 939
940static int 940static int
941iscsi_broadcast_skb(struct sk_buff *skb, gfp_t gfp) 941iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
942{ 942{
943 return netlink_broadcast(nls, skb, 0, 1, gfp); 943 return nlmsg_multicast(nls, skb, 0, group, gfp);
944}
945
946static int
947iscsi_unicast_skb(struct sk_buff *skb, int pid)
948{
949 int rc;
950
951 rc = netlink_unicast(nls, skb, pid, MSG_DONTWAIT);
952 if (rc < 0) {
953 printk(KERN_ERR "iscsi: can not unicast skb (%d)\n", rc);
954 return rc;
955 }
956
957 return 0;
958} 944}
959 945
960int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 946int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
@@ -980,7 +966,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
980 return -ENOMEM; 966 return -ENOMEM;
981 } 967 }
982 968
983 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); 969 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
984 ev = NLMSG_DATA(nlh); 970 ev = NLMSG_DATA(nlh);
985 memset(ev, 0, sizeof(*ev)); 971 memset(ev, 0, sizeof(*ev));
986 ev->transport_handle = iscsi_handle(conn->transport); 972 ev->transport_handle = iscsi_handle(conn->transport);
@@ -991,10 +977,45 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
991 memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); 977 memcpy(pdu, hdr, sizeof(struct iscsi_hdr));
992 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); 978 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size);
993 979
994 return iscsi_unicast_skb(skb, priv->daemon_pid); 980 return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
995} 981}
996EXPORT_SYMBOL_GPL(iscsi_recv_pdu); 982EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
997 983
984int iscsi_offload_mesg(struct Scsi_Host *shost,
985 struct iscsi_transport *transport, uint32_t type,
986 char *data, uint16_t data_size)
987{
988 struct nlmsghdr *nlh;
989 struct sk_buff *skb;
990 struct iscsi_uevent *ev;
991 int len = NLMSG_SPACE(sizeof(*ev) + data_size);
992
993 skb = alloc_skb(len, GFP_NOIO);
994 if (!skb) {
995 printk(KERN_ERR "can not deliver iscsi offload message:OOM\n");
996 return -ENOMEM;
997 }
998
999 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1000 ev = NLMSG_DATA(nlh);
1001 memset(ev, 0, sizeof(*ev));
1002 ev->type = type;
1003 ev->transport_handle = iscsi_handle(transport);
1004 switch (type) {
1005 case ISCSI_KEVENT_PATH_REQ:
1006 ev->r.req_path.host_no = shost->host_no;
1007 break;
1008 case ISCSI_KEVENT_IF_DOWN:
1009 ev->r.notify_if_down.host_no = shost->host_no;
1010 break;
1011 }
1012
1013 memcpy((char *)ev + sizeof(*ev), data, data_size);
1014
1015 return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO);
1016}
1017EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
1018
998void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) 1019void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
999{ 1020{
1000 struct nlmsghdr *nlh; 1021 struct nlmsghdr *nlh;
@@ -1014,7 +1035,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1014 return; 1035 return;
1015 } 1036 }
1016 1037
1017 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); 1038 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1018 ev = NLMSG_DATA(nlh); 1039 ev = NLMSG_DATA(nlh);
1019 ev->transport_handle = iscsi_handle(conn->transport); 1040 ev->transport_handle = iscsi_handle(conn->transport);
1020 ev->type = ISCSI_KEVENT_CONN_ERROR; 1041 ev->type = ISCSI_KEVENT_CONN_ERROR;
@@ -1022,7 +1043,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1022 ev->r.connerror.cid = conn->cid; 1043 ev->r.connerror.cid = conn->cid;
1023 ev->r.connerror.sid = iscsi_conn_get_sid(conn); 1044 ev->r.connerror.sid = iscsi_conn_get_sid(conn);
1024 1045
1025 iscsi_broadcast_skb(skb, GFP_ATOMIC); 1046 iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
1026 1047
1027 iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n", 1048 iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
1028 error); 1049 error);
@@ -1030,8 +1051,8 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1030EXPORT_SYMBOL_GPL(iscsi_conn_error_event); 1051EXPORT_SYMBOL_GPL(iscsi_conn_error_event);
1031 1052
1032static int 1053static int
1033iscsi_if_send_reply(int pid, int seq, int type, int done, int multi, 1054iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
1034 void *payload, int size) 1055 void *payload, int size)
1035{ 1056{
1036 struct sk_buff *skb; 1057 struct sk_buff *skb;
1037 struct nlmsghdr *nlh; 1058 struct nlmsghdr *nlh;
@@ -1045,10 +1066,10 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
1045 return -ENOMEM; 1066 return -ENOMEM;
1046 } 1067 }
1047 1068
1048 nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0); 1069 nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
1049 nlh->nlmsg_flags = flags; 1070 nlh->nlmsg_flags = flags;
1050 memcpy(NLMSG_DATA(nlh), payload, size); 1071 memcpy(NLMSG_DATA(nlh), payload, size);
1051 return iscsi_unicast_skb(skb, pid); 1072 return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
1052} 1073}
1053 1074
1054static int 1075static int
@@ -1085,7 +1106,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
1085 return -ENOMEM; 1106 return -ENOMEM;
1086 } 1107 }
1087 1108
1088 nlhstat = __nlmsg_put(skbstat, priv->daemon_pid, 0, 0, 1109 nlhstat = __nlmsg_put(skbstat, 0, 0, 0,
1089 (len - sizeof(*nlhstat)), 0); 1110 (len - sizeof(*nlhstat)), 0);
1090 evstat = NLMSG_DATA(nlhstat); 1111 evstat = NLMSG_DATA(nlhstat);
1091 memset(evstat, 0, sizeof(*evstat)); 1112 memset(evstat, 0, sizeof(*evstat));
@@ -1109,7 +1130,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
1109 skb_trim(skbstat, NLMSG_ALIGN(actual_size)); 1130 skb_trim(skbstat, NLMSG_ALIGN(actual_size));
1110 nlhstat->nlmsg_len = actual_size; 1131 nlhstat->nlmsg_len = actual_size;
1111 1132
1112 err = iscsi_unicast_skb(skbstat, priv->daemon_pid); 1133 err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID,
1134 GFP_ATOMIC);
1113 } while (err < 0 && err != -ECONNREFUSED); 1135 } while (err < 0 && err != -ECONNREFUSED);
1114 1136
1115 return err; 1137 return err;
@@ -1143,7 +1165,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
1143 return -ENOMEM; 1165 return -ENOMEM;
1144 } 1166 }
1145 1167
1146 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); 1168 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1147 ev = NLMSG_DATA(nlh); 1169 ev = NLMSG_DATA(nlh);
1148 ev->transport_handle = iscsi_handle(session->transport); 1170 ev->transport_handle = iscsi_handle(session->transport);
1149 1171
@@ -1172,7 +1194,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
1172 * this will occur if the daemon is not up, so we just warn 1194 * this will occur if the daemon is not up, so we just warn
1173 * the user and when the daemon is restarted it will handle it 1195 * the user and when the daemon is restarted it will handle it
1174 */ 1196 */
1175 rc = iscsi_broadcast_skb(skb, GFP_KERNEL); 1197 rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
1176 if (rc == -ESRCH) 1198 if (rc == -ESRCH)
1177 iscsi_cls_session_printk(KERN_ERR, session, 1199 iscsi_cls_session_printk(KERN_ERR, session,
1178 "Cannot notify userspace of session " 1200 "Cannot notify userspace of session "
@@ -1268,26 +1290,54 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
1268 return err; 1290 return err;
1269} 1291}
1270 1292
1293static int iscsi_if_ep_connect(struct iscsi_transport *transport,
1294 struct iscsi_uevent *ev, int msg_type)
1295{
1296 struct iscsi_endpoint *ep;
1297 struct sockaddr *dst_addr;
1298 struct Scsi_Host *shost = NULL;
1299 int non_blocking, err = 0;
1300
1301 if (!transport->ep_connect)
1302 return -EINVAL;
1303
1304 if (msg_type == ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST) {
1305 shost = scsi_host_lookup(ev->u.ep_connect_through_host.host_no);
1306 if (!shost) {
1307 printk(KERN_ERR "ep connect failed. Could not find "
1308 "host no %u\n",
1309 ev->u.ep_connect_through_host.host_no);
1310 return -ENODEV;
1311 }
1312 non_blocking = ev->u.ep_connect_through_host.non_blocking;
1313 } else
1314 non_blocking = ev->u.ep_connect.non_blocking;
1315
1316 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
1317 ep = transport->ep_connect(shost, dst_addr, non_blocking);
1318 if (IS_ERR(ep)) {
1319 err = PTR_ERR(ep);
1320 goto release_host;
1321 }
1322
1323 ev->r.ep_connect_ret.handle = ep->id;
1324release_host:
1325 if (shost)
1326 scsi_host_put(shost);
1327 return err;
1328}
1329
1271static int 1330static int
1272iscsi_if_transport_ep(struct iscsi_transport *transport, 1331iscsi_if_transport_ep(struct iscsi_transport *transport,
1273 struct iscsi_uevent *ev, int msg_type) 1332 struct iscsi_uevent *ev, int msg_type)
1274{ 1333{
1275 struct iscsi_endpoint *ep; 1334 struct iscsi_endpoint *ep;
1276 struct sockaddr *dst_addr;
1277 int rc = 0; 1335 int rc = 0;
1278 1336
1279 switch (msg_type) { 1337 switch (msg_type) {
1338 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
1280 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: 1339 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
1281 if (!transport->ep_connect) 1340 rc = iscsi_if_ep_connect(transport, ev, msg_type);
1282 return -EINVAL;
1283
1284 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
1285 ep = transport->ep_connect(dst_addr,
1286 ev->u.ep_connect.non_blocking);
1287 if (IS_ERR(ep))
1288 return PTR_ERR(ep);
1289
1290 ev->r.ep_connect_ret.handle = ep->id;
1291 break; 1341 break;
1292 case ISCSI_UEVENT_TRANSPORT_EP_POLL: 1342 case ISCSI_UEVENT_TRANSPORT_EP_POLL:
1293 if (!transport->ep_poll) 1343 if (!transport->ep_poll)
@@ -1365,7 +1415,31 @@ iscsi_set_host_param(struct iscsi_transport *transport,
1365} 1415}
1366 1416
1367static int 1417static int
1368iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 1418iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
1419{
1420 struct Scsi_Host *shost;
1421 struct iscsi_path *params;
1422 int err;
1423
1424 if (!transport->set_path)
1425 return -ENOSYS;
1426
1427 shost = scsi_host_lookup(ev->u.set_path.host_no);
1428 if (!shost) {
1429 printk(KERN_ERR "set path could not find host no %u\n",
1430 ev->u.set_path.host_no);
1431 return -ENODEV;
1432 }
1433
1434 params = (struct iscsi_path *)((char *)ev + sizeof(*ev));
1435 err = transport->set_path(shost, params);
1436
1437 scsi_host_put(shost);
1438 return err;
1439}
1440
1441static int
1442iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
1369{ 1443{
1370 int err = 0; 1444 int err = 0;
1371 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 1445 struct iscsi_uevent *ev = NLMSG_DATA(nlh);
@@ -1375,6 +1449,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1375 struct iscsi_cls_conn *conn; 1449 struct iscsi_cls_conn *conn;
1376 struct iscsi_endpoint *ep = NULL; 1450 struct iscsi_endpoint *ep = NULL;
1377 1451
1452 if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE)
1453 *group = ISCSI_NL_GRP_UIP;
1454 else
1455 *group = ISCSI_NL_GRP_ISCSID;
1456
1378 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); 1457 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
1379 if (!priv) 1458 if (!priv)
1380 return -EINVAL; 1459 return -EINVAL;
@@ -1383,8 +1462,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1383 if (!try_module_get(transport->owner)) 1462 if (!try_module_get(transport->owner))
1384 return -EINVAL; 1463 return -EINVAL;
1385 1464
1386 priv->daemon_pid = NETLINK_CREDS(skb)->pid;
1387
1388 switch (nlh->nlmsg_type) { 1465 switch (nlh->nlmsg_type) {
1389 case ISCSI_UEVENT_CREATE_SESSION: 1466 case ISCSI_UEVENT_CREATE_SESSION:
1390 err = iscsi_if_create_session(priv, ep, ev, 1467 err = iscsi_if_create_session(priv, ep, ev,
@@ -1469,6 +1546,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1469 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: 1546 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
1470 case ISCSI_UEVENT_TRANSPORT_EP_POLL: 1547 case ISCSI_UEVENT_TRANSPORT_EP_POLL:
1471 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: 1548 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
1549 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
1472 err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type); 1550 err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type);
1473 break; 1551 break;
1474 case ISCSI_UEVENT_TGT_DSCVR: 1552 case ISCSI_UEVENT_TGT_DSCVR:
@@ -1477,6 +1555,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1477 case ISCSI_UEVENT_SET_HOST_PARAM: 1555 case ISCSI_UEVENT_SET_HOST_PARAM:
1478 err = iscsi_set_host_param(transport, ev); 1556 err = iscsi_set_host_param(transport, ev);
1479 break; 1557 break;
1558 case ISCSI_UEVENT_PATH_UPDATE:
1559 err = iscsi_set_path(transport, ev);
1560 break;
1480 default: 1561 default:
1481 err = -ENOSYS; 1562 err = -ENOSYS;
1482 break; 1563 break;
@@ -1499,6 +1580,7 @@ iscsi_if_rx(struct sk_buff *skb)
1499 uint32_t rlen; 1580 uint32_t rlen;
1500 struct nlmsghdr *nlh; 1581 struct nlmsghdr *nlh;
1501 struct iscsi_uevent *ev; 1582 struct iscsi_uevent *ev;
1583 uint32_t group;
1502 1584
1503 nlh = nlmsg_hdr(skb); 1585 nlh = nlmsg_hdr(skb);
1504 if (nlh->nlmsg_len < sizeof(*nlh) || 1586 if (nlh->nlmsg_len < sizeof(*nlh) ||
@@ -1511,7 +1593,7 @@ iscsi_if_rx(struct sk_buff *skb)
1511 if (rlen > skb->len) 1593 if (rlen > skb->len)
1512 rlen = skb->len; 1594 rlen = skb->len;
1513 1595
1514 err = iscsi_if_recv_msg(skb, nlh); 1596 err = iscsi_if_recv_msg(skb, nlh, &group);
1515 if (err) { 1597 if (err) {
1516 ev->type = ISCSI_KEVENT_IF_ERROR; 1598 ev->type = ISCSI_KEVENT_IF_ERROR;
1517 ev->iferror = err; 1599 ev->iferror = err;
@@ -1525,8 +1607,7 @@ iscsi_if_rx(struct sk_buff *skb)
1525 */ 1607 */
1526 if (ev->type == ISCSI_UEVENT_GET_STATS && !err) 1608 if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
1527 break; 1609 break;
1528 err = iscsi_if_send_reply( 1610 err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
1529 NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
1530 nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); 1611 nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
1531 } while (err < 0 && err != -ECONNREFUSED); 1612 } while (err < 0 && err != -ECONNREFUSED);
1532 skb_pull(skb, rlen); 1613 skb_pull(skb, rlen);
@@ -1774,7 +1855,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
1774 if (!priv) 1855 if (!priv)
1775 return NULL; 1856 return NULL;
1776 INIT_LIST_HEAD(&priv->list); 1857 INIT_LIST_HEAD(&priv->list);
1777 priv->daemon_pid = -1;
1778 priv->iscsi_transport = tt; 1858 priv->iscsi_transport = tt;
1779 priv->t.user_scan = iscsi_user_scan; 1859 priv->t.user_scan = iscsi_user_scan;
1780 priv->t.create_work_queue = 1; 1860 priv->t.create_work_queue = 1;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 50988cbf7b2d..0895d3c71b03 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -163,21 +163,19 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
163 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); 163 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
164 164
165 while (!blk_queue_plugged(q)) { 165 while (!blk_queue_plugged(q)) {
166 req = elv_next_request(q); 166 req = blk_fetch_request(q);
167 if (!req) 167 if (!req)
168 break; 168 break;
169 169
170 blkdev_dequeue_request(req);
171
172 spin_unlock_irq(q->queue_lock); 170 spin_unlock_irq(q->queue_lock);
173 171
174 handler = to_sas_internal(shost->transportt)->f->smp_handler; 172 handler = to_sas_internal(shost->transportt)->f->smp_handler;
175 ret = handler(shost, rphy, req); 173 ret = handler(shost, rphy, req);
176 req->errors = ret; 174 req->errors = ret;
177 175
178 spin_lock_irq(q->queue_lock); 176 blk_end_request_all(req, ret);
179 177
180 req->end_io(req, ret); 178 spin_lock_irq(q->queue_lock);
181 } 179 }
182} 180}
183 181
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index f49f55c6bfc8..c25bd9a34e02 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -46,6 +46,22 @@
46#define DV_RETRIES 3 /* should only need at most 46#define DV_RETRIES 3 /* should only need at most
47 * two cc/ua clears */ 47 * two cc/ua clears */
48 48
49/* Our blacklist flags */
50enum {
51 SPI_BLIST_NOIUS = 0x1,
52};
53
54/* blacklist table, modelled on scsi_devinfo.c */
55static struct {
56 char *vendor;
57 char *model;
58 unsigned flags;
59} spi_static_device_list[] __initdata = {
60 {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS },
61 {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS },
62 {NULL, NULL, 0}
63};
64
49/* Private data accessors (keep these out of the header file) */ 65/* Private data accessors (keep these out of the header file) */
50#define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress) 66#define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress)
51#define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex) 67#define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex)
@@ -207,6 +223,9 @@ static int spi_device_configure(struct transport_container *tc,
207{ 223{
208 struct scsi_device *sdev = to_scsi_device(dev); 224 struct scsi_device *sdev = to_scsi_device(dev);
209 struct scsi_target *starget = sdev->sdev_target; 225 struct scsi_target *starget = sdev->sdev_target;
226 unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
227 &sdev->inquiry[16],
228 SCSI_DEVINFO_SPI);
210 229
211 /* Populate the target capability fields with the values 230 /* Populate the target capability fields with the values
212 * gleaned from the device inquiry */ 231 * gleaned from the device inquiry */
@@ -216,6 +235,10 @@ static int spi_device_configure(struct transport_container *tc,
216 spi_support_dt(starget) = scsi_device_dt(sdev); 235 spi_support_dt(starget) = scsi_device_dt(sdev);
217 spi_support_dt_only(starget) = scsi_device_dt_only(sdev); 236 spi_support_dt_only(starget) = scsi_device_dt_only(sdev);
218 spi_support_ius(starget) = scsi_device_ius(sdev); 237 spi_support_ius(starget) = scsi_device_ius(sdev);
238 if (bflags & SPI_BLIST_NOIUS) {
239 dev_info(dev, "Information Units disabled by blacklist\n");
240 spi_support_ius(starget) = 0;
241 }
219 spi_support_qas(starget) = scsi_device_qas(sdev); 242 spi_support_qas(starget) = scsi_device_qas(sdev);
220 243
221 return 0; 244 return 0;
@@ -234,8 +257,10 @@ static int spi_setup_transport_attrs(struct transport_container *tc,
234 spi_width(starget) = 0; /* narrow */ 257 spi_width(starget) = 0; /* narrow */
235 spi_max_width(starget) = 1; 258 spi_max_width(starget) = 1;
236 spi_iu(starget) = 0; /* no IU */ 259 spi_iu(starget) = 0; /* no IU */
260 spi_max_iu(starget) = 1;
237 spi_dt(starget) = 0; /* ST */ 261 spi_dt(starget) = 0; /* ST */
238 spi_qas(starget) = 0; 262 spi_qas(starget) = 0;
263 spi_max_qas(starget) = 1;
239 spi_wr_flow(starget) = 0; 264 spi_wr_flow(starget) = 0;
240 spi_rd_strm(starget) = 0; 265 spi_rd_strm(starget) = 0;
241 spi_rti(starget) = 0; 266 spi_rti(starget) = 0;
@@ -360,9 +385,9 @@ static DEVICE_ATTR(field, S_IRUGO, \
360/* The Parallel SCSI Tranport Attributes: */ 385/* The Parallel SCSI Tranport Attributes: */
361spi_transport_max_attr(offset, "%d\n"); 386spi_transport_max_attr(offset, "%d\n");
362spi_transport_max_attr(width, "%d\n"); 387spi_transport_max_attr(width, "%d\n");
363spi_transport_rd_attr(iu, "%d\n"); 388spi_transport_max_attr(iu, "%d\n");
364spi_transport_rd_attr(dt, "%d\n"); 389spi_transport_rd_attr(dt, "%d\n");
365spi_transport_rd_attr(qas, "%d\n"); 390spi_transport_max_attr(qas, "%d\n");
366spi_transport_rd_attr(wr_flow, "%d\n"); 391spi_transport_rd_attr(wr_flow, "%d\n");
367spi_transport_rd_attr(rd_strm, "%d\n"); 392spi_transport_rd_attr(rd_strm, "%d\n");
368spi_transport_rd_attr(rti, "%d\n"); 393spi_transport_rd_attr(rti, "%d\n");
@@ -831,7 +856,7 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
831 return; 856 return;
832 } 857 }
833 858
834 if (!scsi_device_wide(sdev)) { 859 if (!spi_support_wide(starget)) {
835 spi_max_width(starget) = 0; 860 spi_max_width(starget) = 0;
836 max_width = 0; 861 max_width = 0;
837 } 862 }
@@ -858,7 +883,7 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
858 return; 883 return;
859 884
860 /* device can't handle synchronous */ 885 /* device can't handle synchronous */
861 if (!scsi_device_sync(sdev) && !scsi_device_dt(sdev)) 886 if (!spi_support_sync(starget) && !spi_support_dt(starget))
862 return; 887 return;
863 888
864 /* len == -1 is the signal that we need to ascertain the 889 /* len == -1 is the signal that we need to ascertain the
@@ -874,13 +899,14 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
874 899
875 /* try QAS requests; this should be harmless to set if the 900 /* try QAS requests; this should be harmless to set if the
876 * target supports it */ 901 * target supports it */
877 if (scsi_device_qas(sdev)) { 902 if (spi_support_qas(starget) && spi_max_qas(starget)) {
878 DV_SET(qas, 1); 903 DV_SET(qas, 1);
879 } else { 904 } else {
880 DV_SET(qas, 0); 905 DV_SET(qas, 0);
881 } 906 }
882 907
883 if (scsi_device_ius(sdev) && min_period < 9) { 908 if (spi_support_ius(starget) && spi_max_iu(starget) &&
909 min_period < 9) {
884 /* This u320 (or u640). Set IU transfers */ 910 /* This u320 (or u640). Set IU transfers */
885 DV_SET(iu, 1); 911 DV_SET(iu, 1);
886 /* Then set the optional parameters */ 912 /* Then set the optional parameters */
@@ -900,7 +926,7 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
900 i->f->get_signalling(shost); 926 i->f->get_signalling(shost);
901 if (spi_signalling(shost) == SPI_SIGNAL_SE || 927 if (spi_signalling(shost) == SPI_SIGNAL_SE ||
902 spi_signalling(shost) == SPI_SIGNAL_HVD || 928 spi_signalling(shost) == SPI_SIGNAL_HVD ||
903 !scsi_device_dt(sdev)) { 929 !spi_support_dt(starget)) {
904 DV_SET(dt, 0); 930 DV_SET(dt, 0);
905 } else { 931 } else {
906 DV_SET(dt, 1); 932 DV_SET(dt, 1);
@@ -1412,12 +1438,18 @@ static mode_t target_attribute_is_visible(struct kobject *kobj,
1412 else if (attr == &dev_attr_iu.attr && 1438 else if (attr == &dev_attr_iu.attr &&
1413 spi_support_ius(starget)) 1439 spi_support_ius(starget))
1414 return TARGET_ATTRIBUTE_HELPER(iu); 1440 return TARGET_ATTRIBUTE_HELPER(iu);
1441 else if (attr == &dev_attr_max_iu.attr &&
1442 spi_support_ius(starget))
1443 return TARGET_ATTRIBUTE_HELPER(iu);
1415 else if (attr == &dev_attr_dt.attr && 1444 else if (attr == &dev_attr_dt.attr &&
1416 spi_support_dt(starget)) 1445 spi_support_dt(starget))
1417 return TARGET_ATTRIBUTE_HELPER(dt); 1446 return TARGET_ATTRIBUTE_HELPER(dt);
1418 else if (attr == &dev_attr_qas.attr && 1447 else if (attr == &dev_attr_qas.attr &&
1419 spi_support_qas(starget)) 1448 spi_support_qas(starget))
1420 return TARGET_ATTRIBUTE_HELPER(qas); 1449 return TARGET_ATTRIBUTE_HELPER(qas);
1450 else if (attr == &dev_attr_max_qas.attr &&
1451 spi_support_qas(starget))
1452 return TARGET_ATTRIBUTE_HELPER(qas);
1421 else if (attr == &dev_attr_wr_flow.attr && 1453 else if (attr == &dev_attr_wr_flow.attr &&
1422 spi_support_ius(starget)) 1454 spi_support_ius(starget))
1423 return TARGET_ATTRIBUTE_HELPER(wr_flow); 1455 return TARGET_ATTRIBUTE_HELPER(wr_flow);
@@ -1447,8 +1479,10 @@ static struct attribute *target_attributes[] = {
1447 &dev_attr_width.attr, 1479 &dev_attr_width.attr,
1448 &dev_attr_max_width.attr, 1480 &dev_attr_max_width.attr,
1449 &dev_attr_iu.attr, 1481 &dev_attr_iu.attr,
1482 &dev_attr_max_iu.attr,
1450 &dev_attr_dt.attr, 1483 &dev_attr_dt.attr,
1451 &dev_attr_qas.attr, 1484 &dev_attr_qas.attr,
1485 &dev_attr_max_qas.attr,
1452 &dev_attr_wr_flow.attr, 1486 &dev_attr_wr_flow.attr,
1453 &dev_attr_rd_strm.attr, 1487 &dev_attr_rd_strm.attr,
1454 &dev_attr_rti.attr, 1488 &dev_attr_rti.attr,
@@ -1513,7 +1547,21 @@ EXPORT_SYMBOL(spi_release_transport);
1513 1547
1514static __init int spi_transport_init(void) 1548static __init int spi_transport_init(void)
1515{ 1549{
1516 int error = transport_class_register(&spi_transport_class); 1550 int error = scsi_dev_info_add_list(SCSI_DEVINFO_SPI,
1551 "SCSI Parallel Transport Class");
1552 if (!error) {
1553 int i;
1554
1555 for (i = 0; spi_static_device_list[i].vendor; i++)
1556 scsi_dev_info_list_add_keyed(1, /* compatible */
1557 spi_static_device_list[i].vendor,
1558 spi_static_device_list[i].model,
1559 NULL,
1560 spi_static_device_list[i].flags,
1561 SCSI_DEVINFO_SPI);
1562 }
1563
1564 error = transport_class_register(&spi_transport_class);
1517 if (error) 1565 if (error)
1518 return error; 1566 return error;
1519 error = anon_transport_class_register(&spi_device_class); 1567 error = anon_transport_class_register(&spi_device_class);
@@ -1525,6 +1573,7 @@ static void __exit spi_transport_exit(void)
1525 transport_class_unregister(&spi_transport_class); 1573 transport_class_unregister(&spi_transport_class);
1526 anon_transport_class_unregister(&spi_device_class); 1574 anon_transport_class_unregister(&spi_device_class);
1527 transport_class_unregister(&spi_host_class); 1575 transport_class_unregister(&spi_host_class);
1576 scsi_dev_info_remove_list(SCSI_DEVINFO_SPI);
1528} 1577}
1529 1578
1530MODULE_AUTHOR("Martin Hicks"); 1579MODULE_AUTHOR("Martin Hicks");
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 84044233b637..5616cd780ff3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -384,9 +384,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
384 struct scsi_device *sdp = q->queuedata; 384 struct scsi_device *sdp = q->queuedata;
385 struct gendisk *disk = rq->rq_disk; 385 struct gendisk *disk = rq->rq_disk;
386 struct scsi_disk *sdkp; 386 struct scsi_disk *sdkp;
387 sector_t block = rq->sector; 387 sector_t block = blk_rq_pos(rq);
388 sector_t threshold; 388 sector_t threshold;
389 unsigned int this_count = rq->nr_sectors; 389 unsigned int this_count = blk_rq_sectors(rq);
390 int ret, host_dif; 390 int ret, host_dif;
391 391
392 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 392 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -413,10 +413,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
413 this_count)); 413 this_count));
414 414
415 if (!sdp || !scsi_device_online(sdp) || 415 if (!sdp || !scsi_device_online(sdp) ||
416 block + rq->nr_sectors > get_capacity(disk)) { 416 block + blk_rq_sectors(rq) > get_capacity(disk)) {
417 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 417 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
418 "Finishing %ld sectors\n", 418 "Finishing %u sectors\n",
419 rq->nr_sectors)); 419 blk_rq_sectors(rq)));
420 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 420 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
421 "Retry with 0x%p\n", SCpnt)); 421 "Retry with 0x%p\n", SCpnt));
422 goto out; 422 goto out;
@@ -463,7 +463,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
463 * for this. 463 * for this.
464 */ 464 */
465 if (sdp->sector_size == 1024) { 465 if (sdp->sector_size == 1024) {
466 if ((block & 1) || (rq->nr_sectors & 1)) { 466 if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
467 scmd_printk(KERN_ERR, SCpnt, 467 scmd_printk(KERN_ERR, SCpnt,
468 "Bad block number requested\n"); 468 "Bad block number requested\n");
469 goto out; 469 goto out;
@@ -473,7 +473,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
473 } 473 }
474 } 474 }
475 if (sdp->sector_size == 2048) { 475 if (sdp->sector_size == 2048) {
476 if ((block & 3) || (rq->nr_sectors & 3)) { 476 if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
477 scmd_printk(KERN_ERR, SCpnt, 477 scmd_printk(KERN_ERR, SCpnt,
478 "Bad block number requested\n"); 478 "Bad block number requested\n");
479 goto out; 479 goto out;
@@ -483,7 +483,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
483 } 483 }
484 } 484 }
485 if (sdp->sector_size == 4096) { 485 if (sdp->sector_size == 4096) {
486 if ((block & 7) || (rq->nr_sectors & 7)) { 486 if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
487 scmd_printk(KERN_ERR, SCpnt, 487 scmd_printk(KERN_ERR, SCpnt,
488 "Bad block number requested\n"); 488 "Bad block number requested\n");
489 goto out; 489 goto out;
@@ -512,10 +512,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
512 } 512 }
513 513
514 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 514 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
515 "%s %d/%ld 512 byte blocks.\n", 515 "%s %d/%u 512 byte blocks.\n",
516 (rq_data_dir(rq) == WRITE) ? 516 (rq_data_dir(rq) == WRITE) ?
517 "writing" : "reading", this_count, 517 "writing" : "reading", this_count,
518 rq->nr_sectors)); 518 blk_rq_sectors(rq)));
519 519
520 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */ 520 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
521 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); 521 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
@@ -971,8 +971,8 @@ static struct block_device_operations sd_fops = {
971 971
972static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 972static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
973{ 973{
974 u64 start_lba = scmd->request->sector; 974 u64 start_lba = blk_rq_pos(scmd->request);
975 u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512); 975 u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
976 u64 bad_lba; 976 u64 bad_lba;
977 int info_valid; 977 int info_valid;
978 978
@@ -1307,6 +1307,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1307 int sense_valid = 0; 1307 int sense_valid = 0;
1308 int the_result; 1308 int the_result;
1309 int retries = 3; 1309 int retries = 3;
1310 unsigned int alignment;
1310 unsigned long long lba; 1311 unsigned long long lba;
1311 unsigned sector_size; 1312 unsigned sector_size;
1312 1313
@@ -1358,6 +1359,16 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1358 return -EOVERFLOW; 1359 return -EOVERFLOW;
1359 } 1360 }
1360 1361
1362 /* Logical blocks per physical block exponent */
1363 sdkp->hw_sector_size = (1 << (buffer[13] & 0xf)) * sector_size;
1364
1365 /* Lowest aligned logical block */
1366 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
1367 blk_queue_alignment_offset(sdp->request_queue, alignment);
1368 if (alignment && sdkp->first_scan)
1369 sd_printk(KERN_NOTICE, sdkp,
1370 "physical block alignment offset: %u\n", alignment);
1371
1361 sdkp->capacity = lba + 1; 1372 sdkp->capacity = lba + 1;
1362 return sector_size; 1373 return sector_size;
1363} 1374}
@@ -1409,6 +1420,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
1409 } 1420 }
1410 1421
1411 sdkp->capacity = lba + 1; 1422 sdkp->capacity = lba + 1;
1423 sdkp->hw_sector_size = sector_size;
1412 return sector_size; 1424 return sector_size;
1413} 1425}
1414 1426
@@ -1510,7 +1522,7 @@ got_data:
1510 */ 1522 */
1511 sector_size = 512; 1523 sector_size = 512;
1512 } 1524 }
1513 blk_queue_hardsect_size(sdp->request_queue, sector_size); 1525 blk_queue_logical_block_size(sdp->request_queue, sector_size);
1514 1526
1515 { 1527 {
1516 char cap_str_2[10], cap_str_10[10]; 1528 char cap_str_2[10], cap_str_10[10];
@@ -1521,11 +1533,17 @@ got_data:
1521 string_get_size(sz, STRING_UNITS_10, cap_str_10, 1533 string_get_size(sz, STRING_UNITS_10, cap_str_10,
1522 sizeof(cap_str_10)); 1534 sizeof(cap_str_10));
1523 1535
1524 if (sdkp->first_scan || old_capacity != sdkp->capacity) 1536 if (sdkp->first_scan || old_capacity != sdkp->capacity) {
1525 sd_printk(KERN_NOTICE, sdkp, 1537 sd_printk(KERN_NOTICE, sdkp,
1526 "%llu %d-byte hardware sectors: (%s/%s)\n", 1538 "%llu %d-byte logical blocks: (%s/%s)\n",
1527 (unsigned long long)sdkp->capacity, 1539 (unsigned long long)sdkp->capacity,
1528 sector_size, cap_str_10, cap_str_2); 1540 sector_size, cap_str_10, cap_str_2);
1541
1542 if (sdkp->hw_sector_size != sector_size)
1543 sd_printk(KERN_NOTICE, sdkp,
1544 "%u-byte physical blocks\n",
1545 sdkp->hw_sector_size);
1546 }
1529 } 1547 }
1530 1548
1531 /* Rescale capacity to 512-byte units */ 1549 /* Rescale capacity to 512-byte units */
@@ -1538,6 +1556,7 @@ got_data:
1538 else if (sector_size == 256) 1556 else if (sector_size == 256)
1539 sdkp->capacity >>= 1; 1557 sdkp->capacity >>= 1;
1540 1558
1559 blk_queue_physical_block_size(sdp->request_queue, sdkp->hw_sector_size);
1541 sdkp->device->sector_size = sector_size; 1560 sdkp->device->sector_size = sector_size;
1542} 1561}
1543 1562
@@ -1776,6 +1795,52 @@ void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
1776} 1795}
1777 1796
1778/** 1797/**
1798 * sd_read_block_limits - Query disk device for preferred I/O sizes.
1799 * @disk: disk to query
1800 */
1801static void sd_read_block_limits(struct scsi_disk *sdkp)
1802{
1803 unsigned int sector_sz = sdkp->device->sector_size;
1804 char *buffer;
1805
1806 /* Block Limits VPD */
1807 buffer = scsi_get_vpd_page(sdkp->device, 0xb0);
1808
1809 if (buffer == NULL)
1810 return;
1811
1812 blk_queue_io_min(sdkp->disk->queue,
1813 get_unaligned_be16(&buffer[6]) * sector_sz);
1814 blk_queue_io_opt(sdkp->disk->queue,
1815 get_unaligned_be32(&buffer[12]) * sector_sz);
1816
1817 kfree(buffer);
1818}
1819
1820/**
1821 * sd_read_block_characteristics - Query block dev. characteristics
1822 * @disk: disk to query
1823 */
1824static void sd_read_block_characteristics(struct scsi_disk *sdkp)
1825{
1826 char *buffer;
1827 u16 rot;
1828
1829 /* Block Device Characteristics VPD */
1830 buffer = scsi_get_vpd_page(sdkp->device, 0xb1);
1831
1832 if (buffer == NULL)
1833 return;
1834
1835 rot = get_unaligned_be16(&buffer[4]);
1836
1837 if (rot == 1)
1838 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
1839
1840 kfree(buffer);
1841}
1842
1843/**
1779 * sd_revalidate_disk - called the first time a new disk is seen, 1844 * sd_revalidate_disk - called the first time a new disk is seen,
1780 * performs disk spin up, read_capacity, etc. 1845 * performs disk spin up, read_capacity, etc.
1781 * @disk: struct gendisk we care about 1846 * @disk: struct gendisk we care about
@@ -1812,6 +1877,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
1812 */ 1877 */
1813 if (sdkp->media_present) { 1878 if (sdkp->media_present) {
1814 sd_read_capacity(sdkp, buffer); 1879 sd_read_capacity(sdkp, buffer);
1880 sd_read_block_limits(sdkp);
1881 sd_read_block_characteristics(sdkp);
1815 sd_read_write_protect_flag(sdkp, buffer); 1882 sd_read_write_protect_flag(sdkp, buffer);
1816 sd_read_cache_type(sdkp, buffer); 1883 sd_read_cache_type(sdkp, buffer);
1817 sd_read_app_tag_own(sdkp, buffer); 1884 sd_read_app_tag_own(sdkp, buffer);
@@ -1902,24 +1969,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1902 index = sdkp->index; 1969 index = sdkp->index;
1903 dev = &sdp->sdev_gendev; 1970 dev = &sdp->sdev_gendev;
1904 1971
1905 if (!sdp->request_queue->rq_timeout) {
1906 if (sdp->type != TYPE_MOD)
1907 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
1908 else
1909 blk_queue_rq_timeout(sdp->request_queue,
1910 SD_MOD_TIMEOUT);
1911 }
1912
1913 device_initialize(&sdkp->dev);
1914 sdkp->dev.parent = &sdp->sdev_gendev;
1915 sdkp->dev.class = &sd_disk_class;
1916 dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
1917
1918 if (device_add(&sdkp->dev))
1919 goto out_free_index;
1920
1921 get_device(&sdp->sdev_gendev);
1922
1923 if (index < SD_MAX_DISKS) { 1972 if (index < SD_MAX_DISKS) {
1924 gd->major = sd_major((index & 0xf0) >> 4); 1973 gd->major = sd_major((index & 0xf0) >> 4);
1925 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 1974 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
@@ -1952,13 +2001,10 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1952 add_disk(gd); 2001 add_disk(gd);
1953 sd_dif_config_host(sdkp); 2002 sd_dif_config_host(sdkp);
1954 2003
2004 sd_revalidate_disk(gd);
2005
1955 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 2006 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
1956 sdp->removable ? "removable " : ""); 2007 sdp->removable ? "removable " : "");
1957
1958 return;
1959
1960 out_free_index:
1961 ida_remove(&sd_index_ida, index);
1962} 2008}
1963 2009
1964/** 2010/**
@@ -2026,6 +2072,24 @@ static int sd_probe(struct device *dev)
2026 sdkp->openers = 0; 2072 sdkp->openers = 0;
2027 sdkp->previous_state = 1; 2073 sdkp->previous_state = 1;
2028 2074
2075 if (!sdp->request_queue->rq_timeout) {
2076 if (sdp->type != TYPE_MOD)
2077 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
2078 else
2079 blk_queue_rq_timeout(sdp->request_queue,
2080 SD_MOD_TIMEOUT);
2081 }
2082
2083 device_initialize(&sdkp->dev);
2084 sdkp->dev.parent = &sdp->sdev_gendev;
2085 sdkp->dev.class = &sd_disk_class;
2086 dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
2087
2088 if (device_add(&sdkp->dev))
2089 goto out_free_index;
2090
2091 get_device(&sdp->sdev_gendev);
2092
2029 async_schedule(sd_probe_async, sdkp); 2093 async_schedule(sd_probe_async, sdkp);
2030 2094
2031 return 0; 2095 return 0;
@@ -2055,8 +2119,11 @@ static int sd_probe(struct device *dev)
2055 **/ 2119 **/
2056static int sd_remove(struct device *dev) 2120static int sd_remove(struct device *dev)
2057{ 2121{
2058 struct scsi_disk *sdkp = dev_get_drvdata(dev); 2122 struct scsi_disk *sdkp;
2059 2123
2124 async_synchronize_full();
2125 sdkp = dev_get_drvdata(dev);
2126 blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
2060 device_del(&sdkp->dev); 2127 device_del(&sdkp->dev);
2061 del_gendisk(sdkp->disk); 2128 del_gendisk(sdkp->disk);
2062 sd_shutdown(dev); 2129 sd_shutdown(dev);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 708778cf5f06..8474b5bad3fe 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -45,6 +45,7 @@ struct scsi_disk {
45 unsigned int openers; /* protected by BKL for now, yuck */ 45 unsigned int openers; /* protected by BKL for now, yuck */
46 sector_t capacity; /* size in 512-byte sectors */ 46 sector_t capacity; /* size in 512-byte sectors */
47 u32 index; 47 u32 index;
48 unsigned short hw_sector_size;
48 u8 media_present; 49 u8 media_present;
49 u8 write_prot; 50 u8 write_prot;
50 u8 protection_type;/* Data Integrity Field */ 51 u8 protection_type;/* Data Integrity Field */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 184dff492797..82f14a9482d0 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -507,7 +507,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
507 sector_sz = scmd->device->sector_size; 507 sector_sz = scmd->device->sector_size;
508 sectors = good_bytes / sector_sz; 508 sectors = good_bytes / sector_sz;
509 509
510 phys = scmd->request->sector & 0xffffffff; 510 phys = blk_rq_pos(scmd->request) & 0xffffffff;
511 if (sector_sz == 4096) 511 if (sector_sz == 4096)
512 phys >>= 3; 512 phys >>= 3;
513 513
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index e1716f14cd47..8201387b4daa 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -289,8 +289,8 @@ sg_open(struct inode *inode, struct file *filp)
289 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ 289 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
290 sdp->sgdebug = 0; 290 sdp->sgdebug = 0;
291 q = sdp->device->request_queue; 291 q = sdp->device->request_queue;
292 sdp->sg_tablesize = min(q->max_hw_segments, 292 sdp->sg_tablesize = min(queue_max_hw_segments(q),
293 q->max_phys_segments); 293 queue_max_phys_segments(q));
294 } 294 }
295 if ((sfp = sg_add_sfp(sdp, dev))) 295 if ((sfp = sg_add_sfp(sdp, dev)))
296 filp->private_data = sfp; 296 filp->private_data = sfp;
@@ -909,7 +909,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
909 if (val < 0) 909 if (val < 0)
910 return -EINVAL; 910 return -EINVAL;
911 val = min_t(int, val, 911 val = min_t(int, val,
912 sdp->device->request_queue->max_sectors * 512); 912 queue_max_sectors(sdp->device->request_queue) * 512);
913 if (val != sfp->reserve.bufflen) { 913 if (val != sfp->reserve.bufflen) {
914 if (sg_res_in_use(sfp) || sfp->mmap_called) 914 if (sg_res_in_use(sfp) || sfp->mmap_called)
915 return -EBUSY; 915 return -EBUSY;
@@ -919,7 +919,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
919 return 0; 919 return 0;
920 case SG_GET_RESERVED_SIZE: 920 case SG_GET_RESERVED_SIZE:
921 val = min_t(int, sfp->reserve.bufflen, 921 val = min_t(int, sfp->reserve.bufflen,
922 sdp->device->request_queue->max_sectors * 512); 922 queue_max_sectors(sdp->device->request_queue) * 512);
923 return put_user(val, ip); 923 return put_user(val, ip);
924 case SG_SET_COMMAND_Q: 924 case SG_SET_COMMAND_Q:
925 result = get_user(val, ip); 925 result = get_user(val, ip);
@@ -1059,12 +1059,13 @@ sg_ioctl(struct inode *inode, struct file *filp,
1059 return -ENODEV; 1059 return -ENODEV;
1060 return scsi_ioctl(sdp->device, cmd_in, p); 1060 return scsi_ioctl(sdp->device, cmd_in, p);
1061 case BLKSECTGET: 1061 case BLKSECTGET:
1062 return put_user(sdp->device->request_queue->max_sectors * 512, 1062 return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
1063 ip); 1063 ip);
1064 case BLKTRACESETUP: 1064 case BLKTRACESETUP:
1065 return blk_trace_setup(sdp->device->request_queue, 1065 return blk_trace_setup(sdp->device->request_queue,
1066 sdp->disk->disk_name, 1066 sdp->disk->disk_name,
1067 MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1067 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
1068 NULL,
1068 (char *)arg); 1069 (char *)arg);
1069 case BLKTRACESTART: 1070 case BLKTRACESTART:
1070 return blk_trace_startstop(sdp->device->request_queue, 1); 1071 return blk_trace_startstop(sdp->device->request_queue, 1);
@@ -1260,7 +1261,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
1260 1261
1261 sense = rq->sense; 1262 sense = rq->sense;
1262 result = rq->errors; 1263 result = rq->errors;
1263 resid = rq->data_len; 1264 resid = rq->resid_len;
1264 1265
1265 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1266 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1266 sdp->disk->disk_name, srp->header.pack_id, result)); 1267 sdp->disk->disk_name, srp->header.pack_id, result));
@@ -1377,7 +1378,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1377 sdp->device = scsidp; 1378 sdp->device = scsidp;
1378 INIT_LIST_HEAD(&sdp->sfds); 1379 INIT_LIST_HEAD(&sdp->sfds);
1379 init_waitqueue_head(&sdp->o_excl_wait); 1380 init_waitqueue_head(&sdp->o_excl_wait);
1380 sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments); 1381 sdp->sg_tablesize = min(queue_max_hw_segments(q),
1382 queue_max_phys_segments(q));
1381 sdp->index = k; 1383 sdp->index = k;
1382 kref_init(&sdp->d_ref); 1384 kref_init(&sdp->d_ref);
1383 1385
@@ -2055,7 +2057,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
2055 sg_big_buff = def_reserved_size; 2057 sg_big_buff = def_reserved_size;
2056 2058
2057 bufflen = min_t(int, sg_big_buff, 2059 bufflen = min_t(int, sg_big_buff,
2058 sdp->device->request_queue->max_sectors * 512); 2060 queue_max_sectors(sdp->device->request_queue) * 512);
2059 sg_build_reserve(sfp, bufflen); 2061 sg_build_reserve(sfp, bufflen);
2060 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", 2062 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2061 sfp->reserve.bufflen, sfp->reserve.k_use_sg)); 2063 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0e1a0f2d2ad5..cce0fe4c8a3b 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -292,7 +292,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
292 if (cd->device->sector_size == 2048) 292 if (cd->device->sector_size == 2048)
293 error_sector <<= 2; 293 error_sector <<= 2;
294 error_sector &= ~(block_sectors - 1); 294 error_sector &= ~(block_sectors - 1);
295 good_bytes = (error_sector - SCpnt->request->sector) << 9; 295 good_bytes = (error_sector -
296 blk_rq_pos(SCpnt->request)) << 9;
296 if (good_bytes < 0 || good_bytes >= this_count) 297 if (good_bytes < 0 || good_bytes >= this_count)
297 good_bytes = 0; 298 good_bytes = 0;
298 /* 299 /*
@@ -349,8 +350,8 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
349 cd->disk->disk_name, block)); 350 cd->disk->disk_name, block));
350 351
351 if (!cd->device || !scsi_device_online(cd->device)) { 352 if (!cd->device || !scsi_device_online(cd->device)) {
352 SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", 353 SCSI_LOG_HLQUEUE(2, printk("Finishing %u sectors\n",
353 rq->nr_sectors)); 354 blk_rq_sectors(rq)));
354 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); 355 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
355 goto out; 356 goto out;
356 } 357 }
@@ -413,7 +414,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
413 /* 414 /*
414 * request doesn't start on hw block boundary, add scatter pads 415 * request doesn't start on hw block boundary, add scatter pads
415 */ 416 */
416 if (((unsigned int)rq->sector % (s_size >> 9)) || 417 if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) ||
417 (scsi_bufflen(SCpnt) % s_size)) { 418 (scsi_bufflen(SCpnt) % s_size)) {
418 scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n"); 419 scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n");
419 goto out; 420 goto out;
@@ -422,14 +423,14 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
422 this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9); 423 this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9);
423 424
424 425
425 SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", 426 SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%u 512 byte blocks.\n",
426 cd->cdi.name, 427 cd->cdi.name,
427 (rq_data_dir(rq) == WRITE) ? 428 (rq_data_dir(rq) == WRITE) ?
428 "writing" : "reading", 429 "writing" : "reading",
429 this_count, rq->nr_sectors)); 430 this_count, blk_rq_sectors(rq)));
430 431
431 SCpnt->cmnd[1] = 0; 432 SCpnt->cmnd[1] = 0;
432 block = (unsigned int)rq->sector / (s_size >> 9); 433 block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9);
433 434
434 if (this_count > 0xffff) { 435 if (this_count > 0xffff) {
435 this_count = 0xffff; 436 this_count = 0xffff;
@@ -726,7 +727,7 @@ static void get_sectorsize(struct scsi_cd *cd)
726 } 727 }
727 728
728 queue = cd->device->request_queue; 729 queue = cd->device->request_queue;
729 blk_queue_hardsect_size(queue, sector_size); 730 blk_queue_logical_block_size(queue, sector_size);
730 731
731 return; 732 return;
732} 733}
@@ -880,6 +881,7 @@ static int sr_remove(struct device *dev)
880{ 881{
881 struct scsi_cd *cd = dev_get_drvdata(dev); 882 struct scsi_cd *cd = dev_get_drvdata(dev);
882 883
884 blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn);
883 del_gendisk(cd->disk); 885 del_gendisk(cd->disk);
884 886
885 mutex_lock(&sr_ref_mutex); 887 mutex_lock(&sr_ref_mutex);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index eb24efea8f14..b33d04250bbc 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -463,7 +463,7 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
463 struct scsi_tape *STp = SRpnt->stp; 463 struct scsi_tape *STp = SRpnt->stp;
464 464
465 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors; 465 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
466 STp->buffer->cmdstat.residual = req->data_len; 466 STp->buffer->cmdstat.residual = req->resid_len;
467 467
468 if (SRpnt->waiting) 468 if (SRpnt->waiting)
469 complete(SRpnt->waiting); 469 complete(SRpnt->waiting);
@@ -2964,7 +2964,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2964 !(STp->use_pf & PF_TESTED)) { 2964 !(STp->use_pf & PF_TESTED)) {
2965 /* Try the other possible state of Page Format if not 2965 /* Try the other possible state of Page Format if not
2966 already tried */ 2966 already tried */
2967 STp->use_pf = !STp->use_pf | PF_TESTED; 2967 STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED;
2968 st_release_request(SRpnt); 2968 st_release_request(SRpnt);
2969 SRpnt = NULL; 2969 SRpnt = NULL;
2970 return st_int_ioctl(STp, cmd_in, arg); 2970 return st_int_ioctl(STp, cmd_in, arg);
@@ -3983,8 +3983,8 @@ static int st_probe(struct device *dev)
3983 return -ENODEV; 3983 return -ENODEV;
3984 } 3984 }
3985 3985
3986 i = min(SDp->request_queue->max_hw_segments, 3986 i = min(queue_max_hw_segments(SDp->request_queue),
3987 SDp->request_queue->max_phys_segments); 3987 queue_max_phys_segments(SDp->request_queue));
3988 if (st_max_sg_segs < i) 3988 if (st_max_sg_segs < i)
3989 i = st_max_sg_segs; 3989 i = st_max_sg_segs;
3990 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i); 3990 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 583966ec8266..45374d66d26a 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -737,11 +737,14 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
737 struct sym_hcb *np = sym_get_hcb(sdev->host); 737 struct sym_hcb *np = sym_get_hcb(sdev->host);
738 struct sym_tcb *tp = &np->target[sdev->id]; 738 struct sym_tcb *tp = &np->target[sdev->id];
739 struct sym_lcb *lp; 739 struct sym_lcb *lp;
740 unsigned long flags;
741 int error;
740 742
741 if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN) 743 if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN)
742 return -ENXIO; 744 return -ENXIO;
743 745
744 tp->starget = sdev->sdev_target; 746 spin_lock_irqsave(np->s.host->host_lock, flags);
747
745 /* 748 /*
746 * Fail the device init if the device is flagged NOSCAN at BOOT in 749 * Fail the device init if the device is flagged NOSCAN at BOOT in
747 * the NVRAM. This may speed up boot and maintain coherency with 750 * the NVRAM. This may speed up boot and maintain coherency with
@@ -753,26 +756,37 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
753 756
754 if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) { 757 if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) {
755 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; 758 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
756 starget_printk(KERN_INFO, tp->starget, 759 starget_printk(KERN_INFO, sdev->sdev_target,
757 "Scan at boot disabled in NVRAM\n"); 760 "Scan at boot disabled in NVRAM\n");
758 return -ENXIO; 761 error = -ENXIO;
762 goto out;
759 } 763 }
760 764
761 if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) { 765 if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) {
762 if (sdev->lun != 0) 766 if (sdev->lun != 0) {
763 return -ENXIO; 767 error = -ENXIO;
764 starget_printk(KERN_INFO, tp->starget, 768 goto out;
769 }
770 starget_printk(KERN_INFO, sdev->sdev_target,
765 "Multiple LUNs disabled in NVRAM\n"); 771 "Multiple LUNs disabled in NVRAM\n");
766 } 772 }
767 773
768 lp = sym_alloc_lcb(np, sdev->id, sdev->lun); 774 lp = sym_alloc_lcb(np, sdev->id, sdev->lun);
769 if (!lp) 775 if (!lp) {
770 return -ENOMEM; 776 error = -ENOMEM;
777 goto out;
778 }
779 if (tp->nlcb == 1)
780 tp->starget = sdev->sdev_target;
771 781
772 spi_min_period(tp->starget) = tp->usr_period; 782 spi_min_period(tp->starget) = tp->usr_period;
773 spi_max_width(tp->starget) = tp->usr_width; 783 spi_max_width(tp->starget) = tp->usr_width;
774 784
775 return 0; 785 error = 0;
786out:
787 spin_unlock_irqrestore(np->s.host->host_lock, flags);
788
789 return error;
776} 790}
777 791
778/* 792/*
@@ -819,12 +833,34 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev)
819static void sym53c8xx_slave_destroy(struct scsi_device *sdev) 833static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
820{ 834{
821 struct sym_hcb *np = sym_get_hcb(sdev->host); 835 struct sym_hcb *np = sym_get_hcb(sdev->host);
822 struct sym_lcb *lp = sym_lp(&np->target[sdev->id], sdev->lun); 836 struct sym_tcb *tp = &np->target[sdev->id];
837 struct sym_lcb *lp = sym_lp(tp, sdev->lun);
838 unsigned long flags;
839
840 spin_lock_irqsave(np->s.host->host_lock, flags);
841
842 if (lp->busy_itlq || lp->busy_itl) {
843 /*
844 * This really shouldn't happen, but we can't return an error
845 * so let's try to stop all on-going I/O.
846 */
847 starget_printk(KERN_WARNING, tp->starget,
848 "Removing busy LCB (%d)\n", sdev->lun);
849 sym_reset_scsi_bus(np, 1);
850 }
823 851
824 if (lp->itlq_tbl) 852 if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) {
825 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK * 4, "ITLQ_TBL"); 853 /*
826 kfree(lp->cb_tags); 854 * It was the last unit for this target.
827 sym_mfree_dma(lp, sizeof(*lp), "LCB"); 855 */
856 tp->head.sval = 0;
857 tp->head.wval = np->rv_scntl3;
858 tp->head.uval = 0;
859 tp->tgoal.check_nego = 1;
860 tp->starget = NULL;
861 }
862
863 spin_unlock_irqrestore(np->s.host->host_lock, flags);
828} 864}
829 865
830/* 866/*
@@ -890,6 +926,8 @@ static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
890 if (!((uc->target >> t) & 1)) 926 if (!((uc->target >> t) & 1))
891 continue; 927 continue;
892 tp = &np->target[t]; 928 tp = &np->target[t];
929 if (!tp->nlcb)
930 continue;
893 931
894 switch (uc->cmd) { 932 switch (uc->cmd) {
895 933
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index ffa70d1ed182..297deb817a5d 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -1896,6 +1896,15 @@ void sym_start_up(struct Scsi_Host *shost, int reason)
1896 tp->head.sval = 0; 1896 tp->head.sval = 0;
1897 tp->head.wval = np->rv_scntl3; 1897 tp->head.wval = np->rv_scntl3;
1898 tp->head.uval = 0; 1898 tp->head.uval = 0;
1899 if (tp->lun0p)
1900 tp->lun0p->to_clear = 0;
1901 if (tp->lunmp) {
1902 int ln;
1903
1904 for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++)
1905 if (tp->lunmp[ln])
1906 tp->lunmp[ln]->to_clear = 0;
1907 }
1899 } 1908 }
1900 1909
1901 /* 1910 /*
@@ -2312,8 +2321,9 @@ static void sym_int_par (struct sym_hcb *np, u_short sist)
2312 int phase = cmd & 7; 2321 int phase = cmd & 7;
2313 struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); 2322 struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa);
2314 2323
2315 printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", 2324 if (printk_ratelimit())
2316 sym_name(np), hsts, dbc, sbcl); 2325 printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n",
2326 sym_name(np), hsts, dbc, sbcl);
2317 2327
2318 /* 2328 /*
2319 * Check that the chip is connected to the SCSI BUS. 2329 * Check that the chip is connected to the SCSI BUS.
@@ -4988,7 +4998,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
4988 */ 4998 */
4989 if (ln && !tp->lunmp) { 4999 if (ln && !tp->lunmp) {
4990 tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *), 5000 tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *),
4991 GFP_KERNEL); 5001 GFP_ATOMIC);
4992 if (!tp->lunmp) 5002 if (!tp->lunmp)
4993 goto fail; 5003 goto fail;
4994 } 5004 }
@@ -5008,6 +5018,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
5008 tp->lun0p = lp; 5018 tp->lun0p = lp;
5009 tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); 5019 tp->head.lun0_sa = cpu_to_scr(vtobus(lp));
5010 } 5020 }
5021 tp->nlcb++;
5011 5022
5012 /* 5023 /*
5013 * Let the itl task point to error handling. 5024 * Let the itl task point to error handling.
@@ -5085,6 +5096,43 @@ fail:
5085} 5096}
5086 5097
5087/* 5098/*
5099 * Lun control block deallocation. Returns the number of valid remaing LCBs
5100 * for the target.
5101 */
5102int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln)
5103{
5104 struct sym_tcb *tp = &np->target[tn];
5105 struct sym_lcb *lp = sym_lp(tp, ln);
5106
5107 tp->nlcb--;
5108
5109 if (ln) {
5110 if (!tp->nlcb) {
5111 kfree(tp->lunmp);
5112 sym_mfree_dma(tp->luntbl, 256, "LUNTBL");
5113 tp->lunmp = NULL;
5114 tp->luntbl = NULL;
5115 tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl));
5116 } else {
5117 tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa));
5118 tp->lunmp[ln] = NULL;
5119 }
5120 } else {
5121 tp->lun0p = NULL;
5122 tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa));
5123 }
5124
5125 if (lp->itlq_tbl) {
5126 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
5127 kfree(lp->cb_tags);
5128 }
5129
5130 sym_mfree_dma(lp, sizeof(*lp), "LCB");
5131
5132 return tp->nlcb;
5133}
5134
5135/*
5088 * Queue a SCSI IO to the controller. 5136 * Queue a SCSI IO to the controller.
5089 */ 5137 */
5090int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) 5138int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
index 9ebc8706b6bf..053e63c86822 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.h
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -401,6 +401,7 @@ struct sym_tcb {
401 * An array of bus addresses is used on reselection. 401 * An array of bus addresses is used on reselection.
402 */ 402 */
403 u32 *luntbl; /* LCBs bus address table */ 403 u32 *luntbl; /* LCBs bus address table */
404 int nlcb; /* Number of valid LCBs (including LUN #0) */
404 405
405 /* 406 /*
406 * LUN table used by the C code. 407 * LUN table used by the C code.
@@ -1065,6 +1066,7 @@ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int
1065struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order); 1066struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order);
1066void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp); 1067void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp);
1067struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln); 1068struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1069int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1068int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp); 1070int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
1069int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out); 1071int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out);
1070int sym_reset_scsi_target(struct sym_hcb *np, int target); 1072int sym_reset_scsi_target(struct sym_hcb *np, int target);
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 601e95141cbe..54023d41fd15 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1306,7 +1306,7 @@ static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scs
1306 if (linked_comm && SCpnt->device->queue_depth > 2 1306 if (linked_comm && SCpnt->device->queue_depth > 2
1307 && TLDEV(SCpnt->device->type)) { 1307 && TLDEV(SCpnt->device->type)) {
1308 HD(j)->cp_stat[i] = READY; 1308 HD(j)->cp_stat[i] = READY;
1309 flush_dev(SCpnt->device, SCpnt->request->sector, j, FALSE); 1309 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE);
1310 return 0; 1310 return 0;
1311 } 1311 }
1312 1312
@@ -1610,11 +1610,13 @@ static int reorder(unsigned int j, unsigned long cursec,
1610 1610
1611 if (!(cpp->xdir == DTD_IN)) input_only = FALSE; 1611 if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
1612 1612
1613 if (SCpnt->request->sector < minsec) minsec = SCpnt->request->sector; 1613 if (blk_rq_pos(SCpnt->request) < minsec)
1614 if (SCpnt->request->sector > maxsec) maxsec = SCpnt->request->sector; 1614 minsec = blk_rq_pos(SCpnt->request);
1615 if (blk_rq_pos(SCpnt->request) > maxsec)
1616 maxsec = blk_rq_pos(SCpnt->request);
1615 1617
1616 sl[n] = SCpnt->request->sector; 1618 sl[n] = blk_rq_pos(SCpnt->request);
1617 ioseek += SCpnt->request->nr_sectors; 1619 ioseek += blk_rq_sectors(SCpnt->request);
1618 1620
1619 if (!n) continue; 1621 if (!n) continue;
1620 1622
@@ -1642,7 +1644,7 @@ static int reorder(unsigned int j, unsigned long cursec,
1642 1644
1643 if (!input_only) for (n = 0; n < n_ready; n++) { 1645 if (!input_only) for (n = 0; n < n_ready; n++) {
1644 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1646 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1645 ll[n] = SCpnt->request->nr_sectors; pl[n] = SCpnt->serial_number; 1647 ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number;
1646 1648
1647 if (!n) continue; 1649 if (!n) continue;
1648 1650
@@ -1666,12 +1668,12 @@ static int reorder(unsigned int j, unsigned long cursec,
1666 if (link_statistics && (overlap || !(flushcount % link_statistics))) 1668 if (link_statistics && (overlap || !(flushcount % link_statistics)))
1667 for (n = 0; n < n_ready; n++) { 1669 for (n = 0; n < n_ready; n++) {
1668 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1670 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1669 printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\ 1671 printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\
1670 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 1672 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
1671 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target, 1673 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
1672 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready, 1674 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready,
1673 SCpnt->request->sector, SCpnt->request->nr_sectors, cursec, 1675 blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
1674 YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), 1676 cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
1675 YESNO(overlap), cpp->xdir); 1677 YESNO(overlap), cpp->xdir);
1676 } 1678 }
1677#endif 1679#endif
@@ -1799,7 +1801,7 @@ static irqreturn_t ihdlr(unsigned int j)
1799 1801
1800 if (linked_comm && SCpnt->device->queue_depth > 2 1802 if (linked_comm && SCpnt->device->queue_depth > 2
1801 && TLDEV(SCpnt->device->type)) 1803 && TLDEV(SCpnt->device->type))
1802 flush_dev(SCpnt->device, SCpnt->request->sector, j, TRUE); 1804 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE);
1803 1805
1804 tstatus = status_byte(spp->target_status); 1806 tstatus = status_byte(spp->target_status);
1805 1807