aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-23 18:55:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-23 18:55:44 -0400
commitacd53127c4adbd34570b221e7ea1f7fc94aea923 (patch)
tree5e24adc30e91db14bc47ef4287319f38eb1b2108 /drivers/scsi
parentf9d1b5a31ab02208e29631756630739175cdaa02 (diff)
parentc8806b6c9e824f47726f2a9b7fbbe7ebf19306fa (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is the usual grab bag of driver updates (lpfc, hpsa, megaraid_sas, cxgbi, be2iscsi) plus an assortment of minor updates. There is also one new driver: the Cisco snic. The advansys driver has been rewritten to get rid of the warning about converting it to the DMA API, the tape statistics patch got in and finally, there's a resuffle of SCSI header files to separate more cleanly initiator from target mode (and better share the common definitions)" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (156 commits) snic: driver for Cisco SCSI HBA qla2xxx: Fix indentation qla2xxx: Comment out unreachable code fusion: remove dead MTRR code advansys: fix compilation errors and warnings when CONFIG_PCI is not set mptsas: fix depth param in scsi_track_queue_full megaraid: fix irq setup process regression lpfc: Update version to 10.7.0.0 for upstream patch set. lpfc: Fix to drop PLOGIs from fabric node till LOGO processing completes lpfc: Fix scsi task management error message. lpfc: Fix cq_id masking problem. lpfc: Fix scsi prep dma buf error. lpfc: Add support for using block multi-queue lpfc: Devices are not discovered during takeaway/giveback testing lpfc: Fix vport deletion failure. lpfc: Check for active portpeerbeacon. lpfc: Update driver version for upstream patch set 10.6.0.1. lpfc: Change buffer pool empty message to miscellaneous category lpfc: Fix incorrect log message reported for empty FCF record. lpfc: Fix rport leak. ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig20
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/NCR53c406a.c1
-rw-r--r--drivers/scsi/a100u2w.c1
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/advansys.c1474
-rw-r--r--drivers/scsi/aha152x.c1
-rw-r--r--drivers/scsi/aha1542.c1
-rw-r--r--drivers/scsi/aha1740.c1
-rw-r--r--drivers/scsi/aha1740.h1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c1
-rw-r--r--drivers/scsi/arm/arxescsi.c1
-rw-r--r--drivers/scsi/arm/cumana_2.c1
-rw-r--r--drivers/scsi/arm/eesox.c1
-rw-r--r--drivers/scsi/atp870u.c1
-rw-r--r--drivers/scsi/atp870u.h1
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c4
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h12
-rw-r--r--drivers/scsi/be2iscsi/be_main.c76
-rw-r--r--drivers/scsi/be2iscsi/be_main.h6
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c69
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h3
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c5
-rw-r--r--drivers/scsi/csiostor/csio_hw.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c20
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.h2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c52
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.h4
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c20
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h6
-rw-r--r--drivers/scsi/dpt_i2o.c1
-rw-r--r--drivers/scsi/fdomain.c1
-rw-r--r--drivers/scsi/hpsa.c2780
-rw-r--r--drivers/scsi/hpsa.h19
-rw-r--r--drivers/scsi/hpsa_cmd.h34
-rw-r--r--drivers/scsi/imm.c1
-rw-r--r--drivers/scsi/initio.c1
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/ips.c9
-rw-r--r--drivers/scsi/isci/init.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c733
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c181
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h201
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h236
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c152
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c82
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c9
-rw-r--r--drivers/scsi/mac53c94.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h342
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c739
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c17
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c554
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h281
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/nsp32.c1
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c1
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c1
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c1
-rw-r--r--drivers/scsi/ppa.c1
-rw-r--r--drivers/scsi/ps3rom.c1
-rw-r--r--drivers/scsi/qla1280.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c16
-rw-r--r--drivers/scsi/qlogicfas.c1
-rw-r--r--drivers/scsi/qlogicpti.c1
-rw-r--r--drivers/scsi/scsi.c46
-rw-r--r--drivers/scsi/scsi_common.c178
-rw-r--r--drivers/scsi/scsi_error.c64
-rw-r--r--drivers/scsi/scsi_scan.c65
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/snic/Makefile17
-rw-r--r--drivers/scsi/snic/cq_desc.h77
-rw-r--r--drivers/scsi/snic/cq_enet_desc.h38
-rw-r--r--drivers/scsi/snic/snic.h414
-rw-r--r--drivers/scsi/snic/snic_attrs.c77
-rw-r--r--drivers/scsi/snic/snic_ctl.c279
-rw-r--r--drivers/scsi/snic/snic_debugfs.c560
-rw-r--r--drivers/scsi/snic/snic_disc.c551
-rw-r--r--drivers/scsi/snic/snic_disc.h124
-rw-r--r--drivers/scsi/snic/snic_fwint.h525
-rw-r--r--drivers/scsi/snic/snic_io.c518
-rw-r--r--drivers/scsi/snic/snic_io.h118
-rw-r--r--drivers/scsi/snic/snic_isr.c204
-rw-r--r--drivers/scsi/snic/snic_main.c1044
-rw-r--r--drivers/scsi/snic/snic_res.c295
-rw-r--r--drivers/scsi/snic/snic_res.h97
-rw-r--r--drivers/scsi/snic/snic_scsi.c2632
-rw-r--r--drivers/scsi/snic/snic_stats.h123
-rw-r--r--drivers/scsi/snic/snic_trc.c181
-rw-r--r--drivers/scsi/snic/snic_trc.h121
-rw-r--r--drivers/scsi/snic/vnic_cq.c86
-rw-r--r--drivers/scsi/snic/vnic_cq.h110
-rw-r--r--drivers/scsi/snic/vnic_cq_fw.h62
-rw-r--r--drivers/scsi/snic/vnic_dev.c748
-rw-r--r--drivers/scsi/snic/vnic_dev.h110
-rw-r--r--drivers/scsi/snic/vnic_devcmd.h270
-rw-r--r--drivers/scsi/snic/vnic_intr.c59
-rw-r--r--drivers/scsi/snic/vnic_intr.h105
-rw-r--r--drivers/scsi/snic/vnic_resource.h68
-rw-r--r--drivers/scsi/snic/vnic_snic.h54
-rw-r--r--drivers/scsi/snic/vnic_stats.h68
-rw-r--r--drivers/scsi/snic/vnic_wq.c237
-rw-r--r--drivers/scsi/snic/vnic_wq.h170
-rw-r--r--drivers/scsi/snic/wq_enet_desc.h96
-rw-r--r--drivers/scsi/st.c272
-rw-r--r--drivers/scsi/st.h22
-rw-r--r--drivers/scsi/sym53c416.c1
-rw-r--r--drivers/scsi/ufs/Kconfig2
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c39
-rw-r--r--drivers/scsi/ufs/ufshcd.c108
-rw-r--r--drivers/scsi/ufs/ufshcd.h53
-rw-r--r--drivers/scsi/ufs/ufshci.h8
-rw-r--r--drivers/scsi/virtio_scsi.c11
-rw-r--r--drivers/scsi/wd719x.c1
-rw-r--r--drivers/scsi/wd719x.h2
131 files changed, 16199 insertions, 3281 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index b94c217a09ae..456e1567841c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -503,7 +503,7 @@ config SCSI_DPT_I2O
503 503
504config SCSI_ADVANSYS 504config SCSI_ADVANSYS
505 tristate "AdvanSys SCSI support" 505 tristate "AdvanSys SCSI support"
506 depends on SCSI && VIRT_TO_BUS && !ARM 506 depends on SCSI
507 depends on ISA || EISA || PCI 507 depends on ISA || EISA || PCI
508 help 508 help
509 This is a driver for all SCSI host adapters manufactured by 509 This is a driver for all SCSI host adapters manufactured by
@@ -634,6 +634,23 @@ config FCOE_FNIC
634 <file:Documentation/scsi/scsi.txt>. 634 <file:Documentation/scsi/scsi.txt>.
635 The module will be called fnic. 635 The module will be called fnic.
636 636
637config SCSI_SNIC
638 tristate "Cisco SNIC Driver"
639 depends on PCI && SCSI
640 help
641 This is support for the Cisco PCI-Express SCSI HBA.
642
643 To compile this driver as a module, choose M here and read
644 <file:Documentation/scsi/scsi.txt>.
645 The module will be called snic.
646
647config SCSI_SNIC_DEBUG_FS
648 bool "Cisco SNIC Driver Debugfs Support"
649 depends on SCSI_SNIC && DEBUG_FS
650 help
651 This enables to list debugging information from SNIC Driver
652 available via debugfs file system
653
637config SCSI_DMX3191D 654config SCSI_DMX3191D
638 tristate "DMX3191D SCSI support" 655 tristate "DMX3191D SCSI support"
639 depends on PCI && SCSI 656 depends on PCI && SCSI
@@ -1743,7 +1760,6 @@ config SCSI_BFA_FC
1743config SCSI_VIRTIO 1760config SCSI_VIRTIO
1744 tristate "virtio-scsi support" 1761 tristate "virtio-scsi support"
1745 depends on VIRTIO 1762 depends on VIRTIO
1746 select BLK_DEV_INTEGRITY
1747 help 1763 help
1748 This is the virtual HBA driver for virtio. If the kernel will 1764 This is the virtual HBA driver for virtio. If the kernel will
1749 be used in a virtual machine, say Y or M. 1765 be used in a virtual machine, say Y or M.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index dee160a4f163..91209e3d27e3 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_LIBFC) += libfc/
39obj-$(CONFIG_LIBFCOE) += fcoe/ 39obj-$(CONFIG_LIBFCOE) += fcoe/
40obj-$(CONFIG_FCOE) += fcoe/ 40obj-$(CONFIG_FCOE) += fcoe/
41obj-$(CONFIG_FCOE_FNIC) += fnic/ 41obj-$(CONFIG_FCOE_FNIC) += fnic/
42obj-$(CONFIG_SCSI_SNIC) += snic/
42obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/ 43obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
43obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o 44obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
44obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o 45obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
@@ -161,6 +162,7 @@ obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/
161obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o 162obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
162scsi_mod-y += scsi.o hosts.o scsi_ioctl.o \ 163scsi_mod-y += scsi.o hosts.o scsi_ioctl.o \
163 scsicam.o scsi_error.o scsi_lib.o 164 scsicam.o scsi_error.o scsi_lib.o
165scsi_mod-y += scsi_common.o
164scsi_mod-$(CONFIG_SCSI_CONSTANTS) += constants.o 166scsi_mod-$(CONFIG_SCSI_CONSTANTS) += constants.o
165scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o 167scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o
166scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o 168scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index 42c7161474f7..6e110c630d2c 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -1064,7 +1064,6 @@ static struct scsi_host_template driver_template =
1064 .can_queue = 1 /* can_queue */, 1064 .can_queue = 1 /* can_queue */,
1065 .this_id = 7 /* SCSI ID of the chip */, 1065 .this_id = 7 /* SCSI ID of the chip */,
1066 .sg_tablesize = 32 /*SG_ALL*/ /*SG_NONE*/, 1066 .sg_tablesize = 32 /*SG_ALL*/ /*SG_NONE*/,
1067 .cmd_per_lun = 1 /* commands per lun */,
1068 .unchecked_isa_dma = 1 /* unchecked_isa_dma */, 1067 .unchecked_isa_dma = 1 /* unchecked_isa_dma */,
1069 .use_clustering = ENABLE_CLUSTERING, 1068 .use_clustering = ENABLE_CLUSTERING,
1070}; 1069};
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 7e33a61c1ba4..cac6b37d7b1b 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1078,7 +1078,6 @@ static struct scsi_host_template inia100_template = {
1078 .can_queue = 1, 1078 .can_queue = 1,
1079 .this_id = 1, 1079 .this_id = 1,
1080 .sg_tablesize = SG_ALL, 1080 .sg_tablesize = SG_ALL,
1081 .cmd_per_lun = 1,
1082 .use_clustering = ENABLE_CLUSTERING, 1081 .use_clustering = ENABLE_CLUSTERING,
1083}; 1082};
1084 1083
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 4596e9dd757c..e63cf9f22f36 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -46,7 +46,7 @@
46 46
47static int aac_src_get_sync_status(struct aac_dev *dev); 47static int aac_src_get_sync_status(struct aac_dev *dev);
48 48
49irqreturn_t aac_src_intr_message(int irq, void *dev_id) 49static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
50{ 50{
51 struct aac_msix_ctx *ctx; 51 struct aac_msix_ctx *ctx;
52 struct aac_dev *dev; 52 struct aac_dev *dev;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index ae95e347f37d..4305178e4e01 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -1,12 +1,10 @@
1#define DRV_NAME "advansys"
2#define ASC_VERSION "3.4" /* AdvanSys Driver Version */
3
4/* 1/*
5 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 2 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
6 * 3 *
7 * Copyright (c) 1995-2000 Advanced System Products, Inc. 4 * Copyright (c) 1995-2000 Advanced System Products, Inc.
8 * Copyright (c) 2000-2001 ConnectCom Solutions, Inc. 5 * Copyright (c) 2000-2001 ConnectCom Solutions, Inc.
9 * Copyright (c) 2007 Matthew Wilcox <matthew@wil.cx> 6 * Copyright (c) 2007 Matthew Wilcox <matthew@wil.cx>
7 * Copyright (c) 2014 Hannes Reinecke <hare@suse.de>
10 * All Rights Reserved. 8 * All Rights Reserved.
11 * 9 *
12 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
@@ -39,6 +37,7 @@
39#include <linux/spinlock.h> 37#include <linux/spinlock.h>
40#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
41#include <linux/firmware.h> 39#include <linux/firmware.h>
40#include <linux/dmapool.h>
42 41
43#include <asm/io.h> 42#include <asm/io.h>
44#include <asm/dma.h> 43#include <asm/dma.h>
@@ -49,26 +48,15 @@
49#include <scsi/scsi.h> 48#include <scsi/scsi.h>
50#include <scsi/scsi_host.h> 49#include <scsi/scsi_host.h>
51 50
51#define DRV_NAME "advansys"
52#define ASC_VERSION "3.5" /* AdvanSys Driver Version */
53
52/* FIXME: 54/* FIXME:
53 * 55 *
54 * 1. Although all of the necessary command mapping places have the 56 * 1. Use scsi_transport_spi
55 * appropriate dma_map.. APIs, the driver still processes its internal 57 * 2. advansys_info is not safe against multiple simultaneous callers
56 * queue using bus_to_virt() and virt_to_bus() which are illegal under 58 * 3. Add module_param to override ISA/VLB ioport array
57 * the API. The entire queue processing structure will need to be
58 * altered to fix this.
59 * 2. Need to add memory mapping workaround. Test the memory mapping.
60 * If it doesn't work revert to I/O port access. Can a test be done
61 * safely?
62 * 3. Handle an interrupt not working. Keep an interrupt counter in
63 * the interrupt handler. In the timeout function if the interrupt
64 * has not occurred then print a message and run in polled mode.
65 * 4. Need to add support for target mode commands, cf. CAM XPT.
66 * 5. check DMA mapping functions for failure
67 * 6. Use scsi_transport_spi
68 * 7. advansys_info is not safe against multiple simultaneous callers
69 * 8. Add module_param to override ISA/VLB ioport array
70 */ 59 */
71#warning this driver is still not properly converted to the DMA API
72 60
73/* Enable driver /proc statistics. */ 61/* Enable driver /proc statistics. */
74#define ADVANSYS_STATS 62#define ADVANSYS_STATS
@@ -76,31 +64,8 @@
76/* Enable driver tracing. */ 64/* Enable driver tracing. */
77#undef ADVANSYS_DEBUG 65#undef ADVANSYS_DEBUG
78 66
79/*
80 * Portable Data Types
81 *
82 * Any instance where a 32-bit long or pointer type is assumed
83 * for precision or HW defined structures, the following define
84 * types must be used. In Linux the char, short, and int types
85 * are all consistent at 8, 16, and 32 bits respectively. Pointers
86 * and long types are 64 bits on Alpha and UltraSPARC.
87 */
88#define ASC_PADDR __u32 /* Physical/Bus address data type. */
89#define ASC_VADDR __u32 /* Virtual address data type. */
90#define ASC_DCNT __u32 /* Unsigned Data count type. */
91#define ASC_SDCNT __s32 /* Signed Data count type. */
92
93typedef unsigned char uchar; 67typedef unsigned char uchar;
94 68
95#ifndef TRUE
96#define TRUE (1)
97#endif
98#ifndef FALSE
99#define FALSE (0)
100#endif
101
102#define ERR (-1)
103#define UW_ERR (uint)(0xFFFF)
104#define isodd_word(val) ((((uint)val) & (uint)0x0001) != 0) 69#define isodd_word(val) ((((uint)val) & (uint)0x0001) != 0)
105 70
106#define PCI_VENDOR_ID_ASP 0x10cd 71#define PCI_VENDOR_ID_ASP 0x10cd
@@ -111,15 +76,6 @@ typedef unsigned char uchar;
111#define PCI_DEVICE_ID_38C0800_REV1 0x2500 76#define PCI_DEVICE_ID_38C0800_REV1 0x2500
112#define PCI_DEVICE_ID_38C1600_REV1 0x2700 77#define PCI_DEVICE_ID_38C1600_REV1 0x2700
113 78
114/*
115 * Enable CC_VERY_LONG_SG_LIST to support up to 64K element SG lists.
116 * The SRB structure will have to be changed and the ASC_SRB2SCSIQ()
117 * macro re-defined to be able to obtain a ASC_SCSI_Q pointer from the
118 * SRB structure.
119 */
120#define CC_VERY_LONG_SG_LIST 0
121#define ASC_SRB2SCSIQ(srb_ptr) (srb_ptr)
122
123#define PortAddr unsigned int /* port address size */ 79#define PortAddr unsigned int /* port address size */
124#define inp(port) inb(port) 80#define inp(port) inb(port)
125#define outp(port, byte) outb((byte), (port)) 81#define outp(port, byte) outb((byte), (port))
@@ -307,15 +263,15 @@ typedef struct asc_scsiq_1 {
307 uchar sg_queue_cnt; 263 uchar sg_queue_cnt;
308 uchar target_id; 264 uchar target_id;
309 uchar target_lun; 265 uchar target_lun;
310 ASC_PADDR data_addr; 266 __le32 data_addr;
311 ASC_DCNT data_cnt; 267 __le32 data_cnt;
312 ASC_PADDR sense_addr; 268 __le32 sense_addr;
313 uchar sense_len; 269 uchar sense_len;
314 uchar extra_bytes; 270 uchar extra_bytes;
315} ASC_SCSIQ_1; 271} ASC_SCSIQ_1;
316 272
317typedef struct asc_scsiq_2 { 273typedef struct asc_scsiq_2 {
318 ASC_VADDR srb_ptr; 274 u32 srb_tag;
319 uchar target_ix; 275 uchar target_ix;
320 uchar flag; 276 uchar flag;
321 uchar cdb_len; 277 uchar cdb_len;
@@ -338,8 +294,8 @@ typedef struct asc_scsiq_4 {
338 uchar y_res; 294 uchar y_res;
339 ushort x_req_count; 295 ushort x_req_count;
340 ushort x_reconnect_rtn; 296 ushort x_reconnect_rtn;
341 ASC_PADDR x_saved_data_addr; 297 __le32 x_saved_data_addr;
342 ASC_DCNT x_saved_data_cnt; 298 __le32 x_saved_data_cnt;
343} ASC_SCSIQ_4; 299} ASC_SCSIQ_4;
344 300
345typedef struct asc_q_done_info { 301typedef struct asc_q_done_info {
@@ -351,12 +307,12 @@ typedef struct asc_q_done_info {
351 uchar sense_len; 307 uchar sense_len;
352 uchar extra_bytes; 308 uchar extra_bytes;
353 uchar res; 309 uchar res;
354 ASC_DCNT remain_bytes; 310 u32 remain_bytes;
355} ASC_QDONE_INFO; 311} ASC_QDONE_INFO;
356 312
357typedef struct asc_sg_list { 313typedef struct asc_sg_list {
358 ASC_PADDR addr; 314 __le32 addr;
359 ASC_DCNT bytes; 315 __le32 bytes;
360} ASC_SG_LIST; 316} ASC_SG_LIST;
361 317
362typedef struct asc_sg_head { 318typedef struct asc_sg_head {
@@ -376,17 +332,6 @@ typedef struct asc_scsi_q {
376 ushort next_sg_index; 332 ushort next_sg_index;
377} ASC_SCSI_Q; 333} ASC_SCSI_Q;
378 334
379typedef struct asc_scsi_req_q {
380 ASC_SCSIQ_1 r1;
381 ASC_SCSIQ_2 r2;
382 uchar *cdbptr;
383 ASC_SG_HEAD *sg_head;
384 uchar *sense_ptr;
385 ASC_SCSIQ_3 r3;
386 uchar cdb[ASC_MAX_CDB_LEN];
387 uchar sense[ASC_MIN_SENSE_LEN];
388} ASC_SCSI_REQ_Q;
389
390typedef struct asc_scsi_bios_req_q { 335typedef struct asc_scsi_bios_req_q {
391 ASC_SCSIQ_1 r1; 336 ASC_SCSIQ_1 r1;
392 ASC_SCSIQ_2 r2; 337 ASC_SCSIQ_2 r2;
@@ -570,7 +515,7 @@ typedef struct asc_dvc_var {
570 dma_addr_t overrun_dma; 515 dma_addr_t overrun_dma;
571 uchar scsi_reset_wait; 516 uchar scsi_reset_wait;
572 uchar chip_no; 517 uchar chip_no;
573 char is_in_int; 518 bool is_in_int;
574 uchar max_total_qng; 519 uchar max_total_qng;
575 uchar cur_total_qng; 520 uchar cur_total_qng;
576 uchar in_critical_cnt; 521 uchar in_critical_cnt;
@@ -586,15 +531,13 @@ typedef struct asc_dvc_var {
586 char redo_scam; 531 char redo_scam;
587 ushort res2; 532 ushort res2;
588 uchar dos_int13_table[ASC_MAX_TID + 1]; 533 uchar dos_int13_table[ASC_MAX_TID + 1];
589 ASC_DCNT max_dma_count; 534 unsigned int max_dma_count;
590 ASC_SCSI_BIT_ID_TYPE no_scam; 535 ASC_SCSI_BIT_ID_TYPE no_scam;
591 ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer; 536 ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer;
592 uchar min_sdtr_index; 537 uchar min_sdtr_index;
593 uchar max_sdtr_index; 538 uchar max_sdtr_index;
594 struct asc_board *drv_ptr; 539 struct asc_board *drv_ptr;
595 int ptr_map_count; 540 unsigned int uc_break;
596 void **ptr_map;
597 ASC_DCNT uc_break;
598} ASC_DVC_VAR; 541} ASC_DVC_VAR;
599 542
600typedef struct asc_dvc_inq_info { 543typedef struct asc_dvc_inq_info {
@@ -602,8 +545,8 @@ typedef struct asc_dvc_inq_info {
602} ASC_DVC_INQ_INFO; 545} ASC_DVC_INQ_INFO;
603 546
604typedef struct asc_cap_info { 547typedef struct asc_cap_info {
605 ASC_DCNT lba; 548 u32 lba;
606 ASC_DCNT blk_size; 549 u32 blk_size;
607} ASC_CAP_INFO; 550} ASC_CAP_INFO;
608 551
609typedef struct asc_cap_info_array { 552typedef struct asc_cap_info_array {
@@ -929,31 +872,6 @@ typedef struct asc_mc_saved {
929#define AscReadChipDvcID(port) (uchar)inp((port)+IOP_REG_ID) 872#define AscReadChipDvcID(port) (uchar)inp((port)+IOP_REG_ID)
930#define AscWriteChipDvcID(port, data) outp((port)+IOP_REG_ID, data) 873#define AscWriteChipDvcID(port, data) outp((port)+IOP_REG_ID, data)
931 874
932/*
933 * Portable Data Types
934 *
935 * Any instance where a 32-bit long or pointer type is assumed
936 * for precision or HW defined structures, the following define
937 * types must be used. In Linux the char, short, and int types
938 * are all consistent at 8, 16, and 32 bits respectively. Pointers
939 * and long types are 64 bits on Alpha and UltraSPARC.
940 */
941#define ADV_PADDR __u32 /* Physical address data type. */
942#define ADV_VADDR __u32 /* Virtual address data type. */
943#define ADV_DCNT __u32 /* Unsigned Data count type. */
944#define ADV_SDCNT __s32 /* Signed Data count type. */
945
946/*
947 * These macros are used to convert a virtual address to a
948 * 32-bit value. This currently can be used on Linux Alpha
949 * which uses 64-bit virtual address but a 32-bit bus address.
950 * This is likely to break in the future, but doing this now
951 * will give us time to change the HW and FW to handle 64-bit
952 * addresses.
953 */
954#define ADV_VADDR_TO_U32 virt_to_bus
955#define ADV_U32_TO_VADDR bus_to_virt
956
957#define AdvPortAddr void __iomem * /* Virtual memory address size */ 875#define AdvPortAddr void __iomem * /* Virtual memory address size */
958 876
959/* 877/*
@@ -965,8 +883,6 @@ typedef struct asc_mc_saved {
965#define ADV_MEM_WRITEW(addr, word) writew(word, addr) 883#define ADV_MEM_WRITEW(addr, word) writew(word, addr)
966#define ADV_MEM_WRITEDW(addr, dword) writel(dword, addr) 884#define ADV_MEM_WRITEDW(addr, dword) writel(dword, addr)
967 885
968#define ADV_CARRIER_COUNT (ASC_DEF_MAX_HOST_QNG + 15)
969
970/* 886/*
971 * Define total number of simultaneous maximum element scatter-gather 887 * Define total number of simultaneous maximum element scatter-gather
972 * request blocks per wide adapter. ASC_DEF_MAX_HOST_QNG (253) is the 888 * request blocks per wide adapter. ASC_DEF_MAX_HOST_QNG (253) is the
@@ -1747,44 +1663,37 @@ typedef struct adveep_38C1600_config {
1747 * little-endian. 1663 * little-endian.
1748 */ 1664 */
1749typedef struct adv_carr_t { 1665typedef struct adv_carr_t {
1750 ADV_VADDR carr_va; /* Carrier Virtual Address */ 1666 __le32 carr_va; /* Carrier Virtual Address */
1751 ADV_PADDR carr_pa; /* Carrier Physical Address */ 1667 __le32 carr_pa; /* Carrier Physical Address */
1752 ADV_VADDR areq_vpa; /* ASC_SCSI_REQ_Q Virtual or Physical Address */ 1668 __le32 areq_vpa; /* ADV_SCSI_REQ_Q Virtual or Physical Address */
1753 /* 1669 /*
1754 * next_vpa [31:4] Carrier Virtual or Physical Next Pointer 1670 * next_vpa [31:4] Carrier Virtual or Physical Next Pointer
1755 * 1671 *
1756 * next_vpa [3:1] Reserved Bits 1672 * next_vpa [3:1] Reserved Bits
1757 * next_vpa [0] Done Flag set in Response Queue. 1673 * next_vpa [0] Done Flag set in Response Queue.
1758 */ 1674 */
1759 ADV_VADDR next_vpa; 1675 __le32 next_vpa;
1760} ADV_CARR_T; 1676} ADV_CARR_T;
1761 1677
1762/* 1678/*
1763 * Mask used to eliminate low 4 bits of carrier 'next_vpa' field. 1679 * Mask used to eliminate low 4 bits of carrier 'next_vpa' field.
1764 */ 1680 */
1765#define ASC_NEXT_VPA_MASK 0xFFFFFFF0 1681#define ADV_NEXT_VPA_MASK 0xFFFFFFF0
1766
1767#define ASC_RQ_DONE 0x00000001
1768#define ASC_RQ_GOOD 0x00000002
1769#define ASC_CQ_STOPPER 0x00000000
1770 1682
1771#define ASC_GET_CARRP(carrp) ((carrp) & ASC_NEXT_VPA_MASK) 1683#define ADV_RQ_DONE 0x00000001
1684#define ADV_RQ_GOOD 0x00000002
1685#define ADV_CQ_STOPPER 0x00000000
1772 1686
1773#define ADV_CARRIER_NUM_PAGE_CROSSING \ 1687#define ADV_GET_CARRP(carrp) ((carrp) & ADV_NEXT_VPA_MASK)
1774 (((ADV_CARRIER_COUNT * sizeof(ADV_CARR_T)) + (PAGE_SIZE - 1))/PAGE_SIZE)
1775
1776#define ADV_CARRIER_BUFSIZE \
1777 ((ADV_CARRIER_COUNT + ADV_CARRIER_NUM_PAGE_CROSSING) * sizeof(ADV_CARR_T))
1778 1688
1779/* 1689/*
1780 * ASC_SCSI_REQ_Q 'a_flag' definitions 1690 * Each carrier is 64 bytes, and we need three additional
1781 * 1691 * carrier for icq, irq, and the termination carrier.
1782 * The Adv Library should limit use to the lower nibble (4 bits) of
1783 * a_flag. Drivers are free to use the upper nibble (4 bits) of a_flag.
1784 */ 1692 */
1785#define ADV_POLL_REQUEST 0x01 /* poll for request completion */ 1693#define ADV_CARRIER_COUNT (ASC_DEF_MAX_HOST_QNG + 3)
1786#define ADV_SCSIQ_DONE 0x02 /* request done */ 1694
1787#define ADV_DONT_RETRY 0x08 /* don't do retry */ 1695#define ADV_CARRIER_BUFSIZE \
1696 (ADV_CARRIER_COUNT * sizeof(ADV_CARR_T))
1788 1697
1789#define ADV_CHIP_ASC3550 0x01 /* Ultra-Wide IC */ 1698#define ADV_CHIP_ASC3550 0x01 /* Ultra-Wide IC */
1790#define ADV_CHIP_ASC38C0800 0x02 /* Ultra2-Wide/LVD IC */ 1699#define ADV_CHIP_ASC38C0800 0x02 /* Ultra2-Wide/LVD IC */
@@ -1816,15 +1725,15 @@ typedef struct adv_dvc_cfg {
1816struct adv_dvc_var; 1725struct adv_dvc_var;
1817struct adv_scsi_req_q; 1726struct adv_scsi_req_q;
1818 1727
1819typedef struct asc_sg_block { 1728typedef struct adv_sg_block {
1820 uchar reserved1; 1729 uchar reserved1;
1821 uchar reserved2; 1730 uchar reserved2;
1822 uchar reserved3; 1731 uchar reserved3;
1823 uchar sg_cnt; /* Valid entries in block. */ 1732 uchar sg_cnt; /* Valid entries in block. */
1824 ADV_PADDR sg_ptr; /* Pointer to next sg block. */ 1733 __le32 sg_ptr; /* Pointer to next sg block. */
1825 struct { 1734 struct {
1826 ADV_PADDR sg_addr; /* SG element address. */ 1735 __le32 sg_addr; /* SG element address. */
1827 ADV_DCNT sg_count; /* SG element count. */ 1736 __le32 sg_count; /* SG element count. */
1828 } sg_list[NO_OF_SG_PER_BLOCK]; 1737 } sg_list[NO_OF_SG_PER_BLOCK];
1829} ADV_SG_BLOCK; 1738} ADV_SG_BLOCK;
1830 1739
@@ -1844,10 +1753,10 @@ typedef struct adv_scsi_req_q {
1844 uchar target_cmd; 1753 uchar target_cmd;
1845 uchar target_id; /* Device target identifier. */ 1754 uchar target_id; /* Device target identifier. */
1846 uchar target_lun; /* Device target logical unit number. */ 1755 uchar target_lun; /* Device target logical unit number. */
1847 ADV_PADDR data_addr; /* Data buffer physical address. */ 1756 __le32 data_addr; /* Data buffer physical address. */
1848 ADV_DCNT data_cnt; /* Data count. Ucode sets to residual. */ 1757 __le32 data_cnt; /* Data count. Ucode sets to residual. */
1849 ADV_PADDR sense_addr; 1758 __le32 sense_addr;
1850 ADV_PADDR carr_pa; 1759 __le32 carr_pa;
1851 uchar mflag; 1760 uchar mflag;
1852 uchar sense_len; 1761 uchar sense_len;
1853 uchar cdb_len; /* SCSI CDB length. Must <= 16 bytes. */ 1762 uchar cdb_len; /* SCSI CDB length. Must <= 16 bytes. */
@@ -1857,29 +1766,26 @@ typedef struct adv_scsi_req_q {
1857 uchar host_status; /* Ucode host status. */ 1766 uchar host_status; /* Ucode host status. */
1858 uchar sg_working_ix; 1767 uchar sg_working_ix;
1859 uchar cdb[12]; /* SCSI CDB bytes 0-11. */ 1768 uchar cdb[12]; /* SCSI CDB bytes 0-11. */
1860 ADV_PADDR sg_real_addr; /* SG list physical address. */ 1769 __le32 sg_real_addr; /* SG list physical address. */
1861 ADV_PADDR scsiq_rptr; 1770 __le32 scsiq_rptr;
1862 uchar cdb16[4]; /* SCSI CDB bytes 12-15. */ 1771 uchar cdb16[4]; /* SCSI CDB bytes 12-15. */
1863 ADV_VADDR scsiq_ptr; 1772 __le32 scsiq_ptr;
1864 ADV_VADDR carr_va; 1773 __le32 carr_va;
1865 /* 1774 /*
1866 * End of microcode structure - 60 bytes. The rest of the structure 1775 * End of microcode structure - 60 bytes. The rest of the structure
1867 * is used by the Adv Library and ignored by the microcode. 1776 * is used by the Adv Library and ignored by the microcode.
1868 */ 1777 */
1869 ADV_VADDR srb_ptr; 1778 u32 srb_tag;
1870 ADV_SG_BLOCK *sg_list_ptr; /* SG list virtual address. */ 1779 ADV_SG_BLOCK *sg_list_ptr; /* SG list virtual address. */
1871 char *vdata_addr; /* Data buffer virtual address. */
1872 uchar a_flag;
1873 uchar pad[2]; /* Pad out to a word boundary. */
1874} ADV_SCSI_REQ_Q; 1780} ADV_SCSI_REQ_Q;
1875 1781
1876/* 1782/*
1877 * The following two structures are used to process Wide Board requests. 1783 * The following two structures are used to process Wide Board requests.
1878 * 1784 *
1879 * The ADV_SCSI_REQ_Q structure in adv_req_t is passed to the Adv Library 1785 * The ADV_SCSI_REQ_Q structure in adv_req_t is passed to the Adv Library
1880 * and microcode with the ADV_SCSI_REQ_Q field 'srb_ptr' pointing to the 1786 * and microcode with the ADV_SCSI_REQ_Q field 'srb_tag' set to the
1881 * adv_req_t. The adv_req_t structure 'cmndp' field in turn points to the 1787 * SCSI request tag. The adv_req_t structure 'cmndp' field in turn points
1882 * Mid-Level SCSI request structure. 1788 * to the Mid-Level SCSI request structure.
1883 * 1789 *
1884 * Zero or more ADV_SG_BLOCK are used with each ADV_SCSI_REQ_Q. Each 1790 * Zero or more ADV_SG_BLOCK are used with each ADV_SCSI_REQ_Q. Each
1885 * ADV_SG_BLOCK structure holds 15 scatter-gather elements. Under Linux 1791 * ADV_SG_BLOCK structure holds 15 scatter-gather elements. Under Linux
@@ -1890,17 +1796,17 @@ typedef struct adv_scsi_req_q {
1890 */ 1796 */
1891typedef struct adv_sgblk { 1797typedef struct adv_sgblk {
1892 ADV_SG_BLOCK sg_block; /* Sgblock structure. */ 1798 ADV_SG_BLOCK sg_block; /* Sgblock structure. */
1893 uchar align[32]; /* Sgblock structure padding. */ 1799 dma_addr_t sg_addr; /* Physical address */
1894 struct adv_sgblk *next_sgblkp; /* Next scatter-gather structure. */ 1800 struct adv_sgblk *next_sgblkp; /* Next scatter-gather structure. */
1895} adv_sgblk_t; 1801} adv_sgblk_t;
1896 1802
1897typedef struct adv_req { 1803typedef struct adv_req {
1898 ADV_SCSI_REQ_Q scsi_req_q; /* Adv Library request structure. */ 1804 ADV_SCSI_REQ_Q scsi_req_q; /* Adv Library request structure. */
1899 uchar align[32]; /* Request structure padding. */ 1805 uchar align[24]; /* Request structure padding. */
1900 struct scsi_cmnd *cmndp; /* Mid-Level SCSI command pointer. */ 1806 struct scsi_cmnd *cmndp; /* Mid-Level SCSI command pointer. */
1807 dma_addr_t req_addr;
1901 adv_sgblk_t *sgblkp; /* Adv Library scatter-gather pointer. */ 1808 adv_sgblk_t *sgblkp; /* Adv Library scatter-gather pointer. */
1902 struct adv_req *next_reqp; /* Next Request Structure. */ 1809} adv_req_t __aligned(32);
1903} adv_req_t;
1904 1810
1905/* 1811/*
1906 * Adapter operation variable structure. 1812 * Adapter operation variable structure.
@@ -1937,12 +1843,12 @@ typedef struct adv_dvc_var {
1937 uchar chip_scsi_id; /* chip SCSI target ID */ 1843 uchar chip_scsi_id; /* chip SCSI target ID */
1938 uchar chip_type; 1844 uchar chip_type;
1939 uchar bist_err_code; 1845 uchar bist_err_code;
1940 ADV_CARR_T *carrier_buf; 1846 ADV_CARR_T *carrier;
1941 ADV_CARR_T *carr_freelist; /* Carrier free list. */ 1847 ADV_CARR_T *carr_freelist; /* Carrier free list. */
1848 dma_addr_t carrier_addr;
1942 ADV_CARR_T *icq_sp; /* Initiator command queue stopper pointer. */ 1849 ADV_CARR_T *icq_sp; /* Initiator command queue stopper pointer. */
1943 ADV_CARR_T *irq_sp; /* Initiator response queue stopper pointer. */ 1850 ADV_CARR_T *irq_sp; /* Initiator response queue stopper pointer. */
1944 ushort carr_pending_cnt; /* Count of pending carriers. */ 1851 ushort carr_pending_cnt; /* Count of pending carriers. */
1945 struct adv_req *orig_reqp; /* adv_req_t memory block. */
1946 /* 1852 /*
1947 * Note: The following fields will not be used after initialization. The 1853 * Note: The following fields will not be used after initialization. The
1948 * driver may discard the buffer after initialization is done. 1854 * driver may discard the buffer after initialization is done.
@@ -2068,8 +1974,8 @@ do { \
2068 AdvReadByteRegister((iop_base), IOPB_CHIP_TYPE_REV) 1974 AdvReadByteRegister((iop_base), IOPB_CHIP_TYPE_REV)
2069 1975
2070/* 1976/*
2071 * Abort an SRB in the chip's RISC Memory. The 'srb_ptr' argument must 1977 * Abort an SRB in the chip's RISC Memory. The 'srb_tag' argument must
2072 * match the ASC_SCSI_REQ_Q 'srb_ptr' field. 1978 * match the ADV_SCSI_REQ_Q 'srb_tag' field.
2073 * 1979 *
2074 * If the request has not yet been sent to the device it will simply be 1980 * If the request has not yet been sent to the device it will simply be
2075 * aborted from RISC memory. If the request is disconnected it will be 1981 * aborted from RISC memory. If the request is disconnected it will be
@@ -2079,9 +1985,9 @@ do { \
2079 * ADV_TRUE(1) - Queue was successfully aborted. 1985 * ADV_TRUE(1) - Queue was successfully aborted.
2080 * ADV_FALSE(0) - Queue was not found on the active queue list. 1986 * ADV_FALSE(0) - Queue was not found on the active queue list.
2081 */ 1987 */
2082#define AdvAbortQueue(asc_dvc, scsiq) \ 1988#define AdvAbortQueue(asc_dvc, srb_tag) \
2083 AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_ABORT, \ 1989 AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_ABORT, \
2084 (ADV_DCNT) (scsiq)) 1990 (ADV_DCNT) (srb_tag))
2085 1991
2086/* 1992/*
2087 * Send a Bus Device Reset Message to the specified target ID. 1993 * Send a Bus Device Reset Message to the specified target ID.
@@ -2095,8 +2001,8 @@ do { \
2095 * are not purged. 2001 * are not purged.
2096 */ 2002 */
2097#define AdvResetDevice(asc_dvc, target_id) \ 2003#define AdvResetDevice(asc_dvc, target_id) \
2098 AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_DEVICE_RESET, \ 2004 AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_DEVICE_RESET, \
2099 (ADV_DCNT) (target_id)) 2005 (ADV_DCNT) (target_id))
2100 2006
2101/* 2007/*
2102 * SCSI Wide Type definition. 2008 * SCSI Wide Type definition.
@@ -2115,7 +2021,7 @@ do { \
2115#define ADV_TID_TO_TIDMASK(tid) (0x01 << ((tid) & ADV_MAX_TID)) 2021#define ADV_TID_TO_TIDMASK(tid) (0x01 << ((tid) & ADV_MAX_TID))
2116 2022
2117/* 2023/*
2118 * ASC_SCSI_REQ_Q 'done_status' and 'host_status' return values. 2024 * ADV_SCSI_REQ_Q 'done_status' and 'host_status' return values.
2119 */ 2025 */
2120 2026
2121#define QD_NO_STATUS 0x00 /* Request not completed yet. */ 2027#define QD_NO_STATUS 0x00 /* Request not completed yet. */
@@ -2153,8 +2059,6 @@ do { \
2153#define QHSTA_M_SGBACKUP_ERROR 0x47 /* Scatter-Gather backup error */ 2059#define QHSTA_M_SGBACKUP_ERROR 0x47 /* Scatter-Gather backup error */
2154 2060
2155/* Return the address that is aligned at the next doubleword >= to 'addr'. */ 2061/* Return the address that is aligned at the next doubleword >= to 'addr'. */
2156#define ADV_8BALIGN(addr) (((ulong) (addr) + 0x7) & ~0x7)
2157#define ADV_16BALIGN(addr) (((ulong) (addr) + 0xF) & ~0xF)
2158#define ADV_32BALIGN(addr) (((ulong) (addr) + 0x1F) & ~0x1F) 2062#define ADV_32BALIGN(addr) (((ulong) (addr) + 0x1F) & ~0x1F)
2159 2063
2160/* 2064/*
@@ -2315,24 +2219,24 @@ do { \
2315/* Per board statistics structure */ 2219/* Per board statistics structure */
2316struct asc_stats { 2220struct asc_stats {
2317 /* Driver Entrypoint Statistics */ 2221 /* Driver Entrypoint Statistics */
2318 ADV_DCNT queuecommand; /* # calls to advansys_queuecommand() */ 2222 unsigned int queuecommand; /* # calls to advansys_queuecommand() */
2319 ADV_DCNT reset; /* # calls to advansys_eh_bus_reset() */ 2223 unsigned int reset; /* # calls to advansys_eh_bus_reset() */
2320 ADV_DCNT biosparam; /* # calls to advansys_biosparam() */ 2224 unsigned int biosparam; /* # calls to advansys_biosparam() */
2321 ADV_DCNT interrupt; /* # advansys_interrupt() calls */ 2225 unsigned int interrupt; /* # advansys_interrupt() calls */
2322 ADV_DCNT callback; /* # calls to asc/adv_isr_callback() */ 2226 unsigned int callback; /* # calls to asc/adv_isr_callback() */
2323 ADV_DCNT done; /* # calls to request's scsi_done function */ 2227 unsigned int done; /* # calls to request's scsi_done function */
2324 ADV_DCNT build_error; /* # asc/adv_build_req() ASC_ERROR returns. */ 2228 unsigned int build_error; /* # asc/adv_build_req() ASC_ERROR returns. */
2325 ADV_DCNT adv_build_noreq; /* # adv_build_req() adv_req_t alloc. fail. */ 2229 unsigned int adv_build_noreq; /* # adv_build_req() adv_req_t alloc. fail. */
2326 ADV_DCNT adv_build_nosg; /* # adv_build_req() adv_sgblk_t alloc. fail. */ 2230 unsigned int adv_build_nosg; /* # adv_build_req() adv_sgblk_t alloc. fail. */
2327 /* AscExeScsiQueue()/AdvExeScsiQueue() Statistics */ 2231 /* AscExeScsiQueue()/AdvExeScsiQueue() Statistics */
2328 ADV_DCNT exe_noerror; /* # ASC_NOERROR returns. */ 2232 unsigned int exe_noerror; /* # ASC_NOERROR returns. */
2329 ADV_DCNT exe_busy; /* # ASC_BUSY returns. */ 2233 unsigned int exe_busy; /* # ASC_BUSY returns. */
2330 ADV_DCNT exe_error; /* # ASC_ERROR returns. */ 2234 unsigned int exe_error; /* # ASC_ERROR returns. */
2331 ADV_DCNT exe_unknown; /* # unknown returns. */ 2235 unsigned int exe_unknown; /* # unknown returns. */
2332 /* Data Transfer Statistics */ 2236 /* Data Transfer Statistics */
2333 ADV_DCNT xfer_cnt; /* # I/O requests received */ 2237 unsigned int xfer_cnt; /* # I/O requests received */
2334 ADV_DCNT xfer_elem; /* # scatter-gather elements */ 2238 unsigned int xfer_elem; /* # scatter-gather elements */
2335 ADV_DCNT xfer_sect; /* # 512-byte blocks */ 2239 unsigned int xfer_sect; /* # 512-byte blocks */
2336}; 2240};
2337#endif /* ADVANSYS_STATS */ 2241#endif /* ADVANSYS_STATS */
2338 2242
@@ -2345,6 +2249,7 @@ struct asc_stats {
2345 */ 2249 */
2346struct asc_board { 2250struct asc_board {
2347 struct device *dev; 2251 struct device *dev;
2252 struct Scsi_Host *shost;
2348 uint flags; /* Board flags */ 2253 uint flags; /* Board flags */
2349 unsigned int irq; 2254 unsigned int irq;
2350 union { 2255 union {
@@ -2366,7 +2271,6 @@ struct asc_board {
2366 ADVEEP_38C0800_CONFIG adv_38C0800_eep; /* 38C0800 EEPROM config. */ 2271 ADVEEP_38C0800_CONFIG adv_38C0800_eep; /* 38C0800 EEPROM config. */
2367 ADVEEP_38C1600_CONFIG adv_38C1600_eep; /* 38C1600 EEPROM config. */ 2272 ADVEEP_38C1600_CONFIG adv_38C1600_eep; /* 38C1600 EEPROM config. */
2368 } eep_config; 2273 } eep_config;
2369 ulong last_reset; /* Saved last reset time */
2370 /* /proc/scsi/advansys/[0...] */ 2274 /* /proc/scsi/advansys/[0...] */
2371#ifdef ADVANSYS_STATS 2275#ifdef ADVANSYS_STATS
2372 struct asc_stats asc_stats; /* Board statistics */ 2276 struct asc_stats asc_stats; /* Board statistics */
@@ -2381,7 +2285,9 @@ struct asc_board {
2381 void __iomem *ioremap_addr; /* I/O Memory remap address. */ 2285 void __iomem *ioremap_addr; /* I/O Memory remap address. */
2382 ushort ioport; /* I/O Port address. */ 2286 ushort ioport; /* I/O Port address. */
2383 adv_req_t *adv_reqp; /* Request structures. */ 2287 adv_req_t *adv_reqp; /* Request structures. */
2384 adv_sgblk_t *adv_sgblkp; /* Scatter-gather structures. */ 2288 dma_addr_t adv_reqp_addr;
2289 size_t adv_reqp_size;
2290 struct dma_pool *adv_sgblk_pool; /* Scatter-gather structures. */
2385 ushort bios_signature; /* BIOS Signature. */ 2291 ushort bios_signature; /* BIOS Signature. */
2386 ushort bios_version; /* BIOS Version. */ 2292 ushort bios_version; /* BIOS Version. */
2387 ushort bios_codeseg; /* BIOS Code Segment. */ 2293 ushort bios_codeseg; /* BIOS Code Segment. */
@@ -2470,12 +2376,11 @@ static void asc_prt_adv_dvc_var(ADV_DVC_VAR *h)
2470 printk(" start_motor 0x%x, scsi_reset_wait 0x%x\n", 2376 printk(" start_motor 0x%x, scsi_reset_wait 0x%x\n",
2471 (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait); 2377 (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait);
2472 2378
2473 printk(" max_host_qng %u, max_dvc_qng %u, carr_freelist 0x%lxn\n", 2379 printk(" max_host_qng %u, max_dvc_qng %u, carr_freelist 0x%p\n",
2474 (unsigned)h->max_host_qng, (unsigned)h->max_dvc_qng, 2380 (unsigned)h->max_host_qng, (unsigned)h->max_dvc_qng,
2475 (ulong)h->carr_freelist); 2381 h->carr_freelist);
2476 2382
2477 printk(" icq_sp 0x%lx, irq_sp 0x%lx\n", 2383 printk(" icq_sp 0x%p, irq_sp 0x%p\n", h->icq_sp, h->irq_sp);
2478 (ulong)h->icq_sp, (ulong)h->irq_sp);
2479 2384
2480 printk(" no_scam 0x%x, tagqng_able 0x%x\n", 2385 printk(" no_scam 0x%x, tagqng_able 0x%x\n",
2481 (unsigned)h->no_scam, (unsigned)h->tagqng_able); 2386 (unsigned)h->no_scam, (unsigned)h->tagqng_able);
@@ -2600,8 +2505,8 @@ static void asc_prt_asc_scsi_q(ASC_SCSI_Q *q)
2600 printk("ASC_SCSI_Q at addr 0x%lx\n", (ulong)q); 2505 printk("ASC_SCSI_Q at addr 0x%lx\n", (ulong)q);
2601 2506
2602 printk 2507 printk
2603 (" target_ix 0x%x, target_lun %u, srb_ptr 0x%lx, tag_code 0x%x,\n", 2508 (" target_ix 0x%x, target_lun %u, srb_tag 0x%x, tag_code 0x%x,\n",
2604 q->q2.target_ix, q->q1.target_lun, (ulong)q->q2.srb_ptr, 2509 q->q2.target_ix, q->q1.target_lun, q->q2.srb_tag,
2605 q->q2.tag_code); 2510 q->q2.tag_code);
2606 2511
2607 printk 2512 printk
@@ -2634,8 +2539,8 @@ static void asc_prt_asc_scsi_q(ASC_SCSI_Q *q)
2634static void asc_prt_asc_qdone_info(ASC_QDONE_INFO *q) 2539static void asc_prt_asc_qdone_info(ASC_QDONE_INFO *q)
2635{ 2540{
2636 printk("ASC_QDONE_INFO at addr 0x%lx\n", (ulong)q); 2541 printk("ASC_QDONE_INFO at addr 0x%lx\n", (ulong)q);
2637 printk(" srb_ptr 0x%lx, target_ix %u, cdb_len %u, tag_code %u,\n", 2542 printk(" srb_tag 0x%x, target_ix %u, cdb_len %u, tag_code %u,\n",
2638 (ulong)q->d2.srb_ptr, q->d2.target_ix, q->d2.cdb_len, 2543 q->d2.srb_tag, q->d2.target_ix, q->d2.cdb_len,
2639 q->d2.tag_code); 2544 q->d2.tag_code);
2640 printk 2545 printk
2641 (" done_stat 0x%x, host_stat 0x%x, scsi_stat 0x%x, scsi_msg 0x%x\n", 2546 (" done_stat 0x%x, host_stat 0x%x, scsi_stat 0x%x, scsi_msg 0x%x\n",
@@ -2651,17 +2556,17 @@ static void asc_prt_adv_sgblock(int sgblockno, ADV_SG_BLOCK *b)
2651{ 2556{
2652 int i; 2557 int i;
2653 2558
2654 printk(" ASC_SG_BLOCK at addr 0x%lx (sgblockno %d)\n", 2559 printk(" ADV_SG_BLOCK at addr 0x%lx (sgblockno %d)\n",
2655 (ulong)b, sgblockno); 2560 (ulong)b, sgblockno);
2656 printk(" sg_cnt %u, sg_ptr 0x%lx\n", 2561 printk(" sg_cnt %u, sg_ptr 0x%x\n",
2657 b->sg_cnt, (ulong)le32_to_cpu(b->sg_ptr)); 2562 b->sg_cnt, (u32)le32_to_cpu(b->sg_ptr));
2658 BUG_ON(b->sg_cnt > NO_OF_SG_PER_BLOCK); 2563 BUG_ON(b->sg_cnt > NO_OF_SG_PER_BLOCK);
2659 if (b->sg_ptr != 0) 2564 if (b->sg_ptr != 0)
2660 BUG_ON(b->sg_cnt != NO_OF_SG_PER_BLOCK); 2565 BUG_ON(b->sg_cnt != NO_OF_SG_PER_BLOCK);
2661 for (i = 0; i < b->sg_cnt; i++) { 2566 for (i = 0; i < b->sg_cnt; i++) {
2662 printk(" [%u]: sg_addr 0x%lx, sg_count 0x%lx\n", 2567 printk(" [%u]: sg_addr 0x%x, sg_count 0x%x\n",
2663 i, (ulong)b->sg_list[i].sg_addr, 2568 i, (u32)le32_to_cpu(b->sg_list[i].sg_addr),
2664 (ulong)b->sg_list[i].sg_count); 2569 (u32)le32_to_cpu(b->sg_list[i].sg_count));
2665 } 2570 }
2666} 2571}
2667 2572
@@ -2673,15 +2578,16 @@ static void asc_prt_adv_sgblock(int sgblockno, ADV_SG_BLOCK *b)
2673static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q) 2578static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q)
2674{ 2579{
2675 int sg_blk_cnt; 2580 int sg_blk_cnt;
2676 struct asc_sg_block *sg_ptr; 2581 struct adv_sg_block *sg_ptr;
2582 adv_sgblk_t *sgblkp;
2677 2583
2678 printk("ADV_SCSI_REQ_Q at addr 0x%lx\n", (ulong)q); 2584 printk("ADV_SCSI_REQ_Q at addr 0x%lx\n", (ulong)q);
2679 2585
2680 printk(" target_id %u, target_lun %u, srb_ptr 0x%lx, a_flag 0x%x\n", 2586 printk(" target_id %u, target_lun %u, srb_tag 0x%x\n",
2681 q->target_id, q->target_lun, (ulong)q->srb_ptr, q->a_flag); 2587 q->target_id, q->target_lun, q->srb_tag);
2682 2588
2683 printk(" cntl 0x%x, data_addr 0x%lx, vdata_addr 0x%lx\n", 2589 printk(" cntl 0x%x, data_addr 0x%lx\n",
2684 q->cntl, (ulong)le32_to_cpu(q->data_addr), (ulong)q->vdata_addr); 2590 q->cntl, (ulong)le32_to_cpu(q->data_addr));
2685 2591
2686 printk(" data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n", 2592 printk(" data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n",
2687 (ulong)le32_to_cpu(q->data_cnt), 2593 (ulong)le32_to_cpu(q->data_cnt),
@@ -2700,21 +2606,15 @@ static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q)
2700 2606
2701 /* Display the request's ADV_SG_BLOCK structures. */ 2607 /* Display the request's ADV_SG_BLOCK structures. */
2702 if (q->sg_list_ptr != NULL) { 2608 if (q->sg_list_ptr != NULL) {
2609 sgblkp = container_of(q->sg_list_ptr, adv_sgblk_t, sg_block);
2703 sg_blk_cnt = 0; 2610 sg_blk_cnt = 0;
2704 while (1) { 2611 while (sgblkp) {
2705 /* 2612 sg_ptr = &sgblkp->sg_block;
2706 * 'sg_ptr' is a physical address. Convert it to a virtual
2707 * address by indexing 'sg_blk_cnt' into the virtual address
2708 * array 'sg_list_ptr'.
2709 *
2710 * XXX - Assumes all SG physical blocks are virtually contiguous.
2711 */
2712 sg_ptr =
2713 &(((ADV_SG_BLOCK *)(q->sg_list_ptr))[sg_blk_cnt]);
2714 asc_prt_adv_sgblock(sg_blk_cnt, sg_ptr); 2613 asc_prt_adv_sgblock(sg_blk_cnt, sg_ptr);
2715 if (sg_ptr->sg_ptr == 0) { 2614 if (sg_ptr->sg_ptr == 0) {
2716 break; 2615 break;
2717 } 2616 }
2617 sgblkp = sgblkp->next_sgblkp;
2718 sg_blk_cnt++; 2618 sg_blk_cnt++;
2719 } 2619 }
2720 } 2620 }
@@ -2722,59 +2622,6 @@ static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q)
2722#endif /* ADVANSYS_DEBUG */ 2622#endif /* ADVANSYS_DEBUG */
2723 2623
2724/* 2624/*
2725 * The advansys chip/microcode contains a 32-bit identifier for each command
2726 * known as the 'srb'. I don't know what it stands for. The driver used
2727 * to encode the scsi_cmnd pointer by calling virt_to_bus and retrieve it
2728 * with bus_to_virt. Now the driver keeps a per-host map of integers to
2729 * pointers. It auto-expands when full, unless it can't allocate memory.
2730 * Note that an srb of 0 is treated specially by the chip/firmware, hence
2731 * the return of i+1 in this routine, and the corresponding subtraction in
2732 * the inverse routine.
2733 */
2734#define BAD_SRB 0
2735static u32 advansys_ptr_to_srb(struct asc_dvc_var *asc_dvc, void *ptr)
2736{
2737 int i;
2738 void **new_ptr;
2739
2740 for (i = 0; i < asc_dvc->ptr_map_count; i++) {
2741 if (!asc_dvc->ptr_map[i])
2742 goto out;
2743 }
2744
2745 if (asc_dvc->ptr_map_count == 0)
2746 asc_dvc->ptr_map_count = 1;
2747 else
2748 asc_dvc->ptr_map_count *= 2;
2749
2750 new_ptr = krealloc(asc_dvc->ptr_map,
2751 asc_dvc->ptr_map_count * sizeof(void *), GFP_ATOMIC);
2752 if (!new_ptr)
2753 return BAD_SRB;
2754 asc_dvc->ptr_map = new_ptr;
2755 out:
2756 ASC_DBG(3, "Putting ptr %p into array offset %d\n", ptr, i);
2757 asc_dvc->ptr_map[i] = ptr;
2758 return i + 1;
2759}
2760
2761static void * advansys_srb_to_ptr(struct asc_dvc_var *asc_dvc, u32 srb)
2762{
2763 void *ptr;
2764
2765 srb--;
2766 if (srb >= asc_dvc->ptr_map_count) {
2767 printk("advansys: bad SRB %u, max %u\n", srb,
2768 asc_dvc->ptr_map_count);
2769 return NULL;
2770 }
2771 ptr = asc_dvc->ptr_map[srb];
2772 asc_dvc->ptr_map[srb] = NULL;
2773 ASC_DBG(3, "Returning ptr %p from array offset %d\n", ptr, srb);
2774 return ptr;
2775}
2776
2777/*
2778 * advansys_info() 2625 * advansys_info()
2779 * 2626 *
2780 * Return suitable for printing on the console with the argument 2627 * Return suitable for printing on the console with the argument
@@ -3350,7 +3197,7 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
3350 3197
3351 seq_printf(m, 3198 seq_printf(m,
3352 " flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n", 3199 " flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n",
3353 boardp->flags, boardp->last_reset, jiffies, 3200 boardp->flags, shost->last_reset, jiffies,
3354 boardp->asc_n_io_port); 3201 boardp->asc_n_io_port);
3355 3202
3356 seq_printf(m, " io_port 0x%lx\n", shost->io_port); 3203 seq_printf(m, " io_port 0x%lx\n", shost->io_port);
@@ -3844,7 +3691,7 @@ static int AscStartChip(PortAddr iop_base)
3844 return (1); 3691 return (1);
3845} 3692}
3846 3693
3847static int AscStopChip(PortAddr iop_base) 3694static bool AscStopChip(PortAddr iop_base)
3848{ 3695{
3849 uchar cc_val; 3696 uchar cc_val;
3850 3697
@@ -3855,22 +3702,22 @@ static int AscStopChip(PortAddr iop_base)
3855 AscSetChipIH(iop_base, INS_HALT); 3702 AscSetChipIH(iop_base, INS_HALT);
3856 AscSetChipIH(iop_base, INS_RFLAG_WTM); 3703 AscSetChipIH(iop_base, INS_RFLAG_WTM);
3857 if ((AscGetChipStatus(iop_base) & CSW_HALTED) == 0) { 3704 if ((AscGetChipStatus(iop_base) & CSW_HALTED) == 0) {
3858 return (0); 3705 return false;
3859 } 3706 }
3860 return (1); 3707 return true;
3861} 3708}
3862 3709
3863static int AscIsChipHalted(PortAddr iop_base) 3710static bool AscIsChipHalted(PortAddr iop_base)
3864{ 3711{
3865 if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) { 3712 if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) {
3866 if ((AscGetChipControl(iop_base) & CC_HALT) != 0) { 3713 if ((AscGetChipControl(iop_base) & CC_HALT) != 0) {
3867 return (1); 3714 return true;
3868 } 3715 }
3869 } 3716 }
3870 return (0); 3717 return false;
3871} 3718}
3872 3719
3873static int AscResetChipAndScsiBus(ASC_DVC_VAR *asc_dvc) 3720static bool AscResetChipAndScsiBus(ASC_DVC_VAR *asc_dvc)
3874{ 3721{
3875 PortAddr iop_base; 3722 PortAddr iop_base;
3876 int i = 10; 3723 int i = 10;
@@ -3953,20 +3800,6 @@ static ushort AscReadLramWord(PortAddr iop_base, ushort addr)
3953 return (word_data); 3800 return (word_data);
3954} 3801}
3955 3802
3956#if CC_VERY_LONG_SG_LIST
3957static ASC_DCNT AscReadLramDWord(PortAddr iop_base, ushort addr)
3958{
3959 ushort val_low, val_high;
3960 ASC_DCNT dword_data;
3961
3962 AscSetChipLramAddr(iop_base, addr);
3963 val_low = AscGetChipLramData(iop_base);
3964 val_high = AscGetChipLramData(iop_base);
3965 dword_data = ((ASC_DCNT) val_high << 16) | (ASC_DCNT) val_low;
3966 return (dword_data);
3967}
3968#endif /* CC_VERY_LONG_SG_LIST */
3969
3970static void 3803static void
3971AscMemWordSetLram(PortAddr iop_base, ushort s_addr, ushort set_wval, int words) 3804AscMemWordSetLram(PortAddr iop_base, ushort s_addr, ushort set_wval, int words)
3972{ 3805{
@@ -4068,27 +3901,24 @@ AscMemWordCopyPtrFromLram(PortAddr iop_base,
4068 } 3901 }
4069} 3902}
4070 3903
4071static ASC_DCNT AscMemSumLramWord(PortAddr iop_base, ushort s_addr, int words) 3904static u32 AscMemSumLramWord(PortAddr iop_base, ushort s_addr, int words)
4072{ 3905{
4073 ASC_DCNT sum; 3906 u32 sum = 0;
4074 int i; 3907 int i;
4075 3908
4076 sum = 0L;
4077 for (i = 0; i < words; i++, s_addr += 2) { 3909 for (i = 0; i < words; i++, s_addr += 2) {
4078 sum += AscReadLramWord(iop_base, s_addr); 3910 sum += AscReadLramWord(iop_base, s_addr);
4079 } 3911 }
4080 return (sum); 3912 return (sum);
4081} 3913}
4082 3914
4083static ushort AscInitLram(ASC_DVC_VAR *asc_dvc) 3915static void AscInitLram(ASC_DVC_VAR *asc_dvc)
4084{ 3916{
4085 uchar i; 3917 uchar i;
4086 ushort s_addr; 3918 ushort s_addr;
4087 PortAddr iop_base; 3919 PortAddr iop_base;
4088 ushort warn_code;
4089 3920
4090 iop_base = asc_dvc->iop_base; 3921 iop_base = asc_dvc->iop_base;
4091 warn_code = 0;
4092 AscMemWordSetLram(iop_base, ASC_QADR_BEG, 0, 3922 AscMemWordSetLram(iop_base, ASC_QADR_BEG, 0,
4093 (ushort)(((int)(asc_dvc->max_total_qng + 2 + 1) * 3923 (ushort)(((int)(asc_dvc->max_total_qng + 2 + 1) *
4094 64) >> 1)); 3924 64) >> 1));
@@ -4127,14 +3957,13 @@ static ushort AscInitLram(ASC_DVC_VAR *asc_dvc)
4127 AscWriteLramByte(iop_base, 3957 AscWriteLramByte(iop_base,
4128 (ushort)(s_addr + (ushort)ASC_SCSIQ_B_QNO), i); 3958 (ushort)(s_addr + (ushort)ASC_SCSIQ_B_QNO), i);
4129 } 3959 }
4130 return warn_code;
4131} 3960}
4132 3961
4133static ASC_DCNT 3962static u32
4134AscLoadMicroCode(PortAddr iop_base, ushort s_addr, 3963AscLoadMicroCode(PortAddr iop_base, ushort s_addr,
4135 const uchar *mcode_buf, ushort mcode_size) 3964 const uchar *mcode_buf, ushort mcode_size)
4136{ 3965{
4137 ASC_DCNT chksum; 3966 u32 chksum;
4138 ushort mcode_word_size; 3967 ushort mcode_word_size;
4139 ushort mcode_chksum; 3968 ushort mcode_chksum;
4140 3969
@@ -4186,13 +4015,13 @@ static void AscInitQLinkVar(ASC_DVC_VAR *asc_dvc)
4186 } 4015 }
4187} 4016}
4188 4017
4189static ushort AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc) 4018static int AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc)
4190{ 4019{
4191 int i; 4020 int i;
4192 ushort warn_code; 4021 int warn_code;
4193 PortAddr iop_base; 4022 PortAddr iop_base;
4194 ASC_PADDR phy_addr; 4023 __le32 phy_addr;
4195 ASC_DCNT phy_size; 4024 __le32 phy_size;
4196 struct asc_board *board = asc_dvc_to_board(asc_dvc); 4025 struct asc_board *board = asc_dvc_to_board(asc_dvc);
4197 4026
4198 iop_base = asc_dvc->iop_base; 4027 iop_base = asc_dvc->iop_base;
@@ -4231,12 +4060,12 @@ static ushort AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc)
4231 AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR); 4060 AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR);
4232 if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) { 4061 if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) {
4233 asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR; 4062 asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR;
4234 warn_code = UW_ERR; 4063 warn_code = -EINVAL;
4235 goto err_mcode_start; 4064 goto err_mcode_start;
4236 } 4065 }
4237 if (AscStartChip(iop_base) != 1) { 4066 if (AscStartChip(iop_base) != 1) {
4238 asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; 4067 asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP;
4239 warn_code = UW_ERR; 4068 warn_code = -EIO;
4240 goto err_mcode_start; 4069 goto err_mcode_start;
4241 } 4070 }
4242 4071
@@ -4250,13 +4079,13 @@ err_dma_map:
4250 return warn_code; 4079 return warn_code;
4251} 4080}
4252 4081
4253static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc) 4082static int AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc)
4254{ 4083{
4255 const struct firmware *fw; 4084 const struct firmware *fw;
4256 const char fwname[] = "advansys/mcode.bin"; 4085 const char fwname[] = "advansys/mcode.bin";
4257 int err; 4086 int err;
4258 unsigned long chksum; 4087 unsigned long chksum;
4259 ushort warn_code; 4088 int warn_code;
4260 PortAddr iop_base; 4089 PortAddr iop_base;
4261 4090
4262 iop_base = asc_dvc->iop_base; 4091 iop_base = asc_dvc->iop_base;
@@ -4268,15 +4097,13 @@ static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc)
4268 } 4097 }
4269 asc_dvc->init_state |= ASC_INIT_STATE_BEG_LOAD_MC; 4098 asc_dvc->init_state |= ASC_INIT_STATE_BEG_LOAD_MC;
4270 if (asc_dvc->err_code != 0) 4099 if (asc_dvc->err_code != 0)
4271 return UW_ERR; 4100 return ASC_ERROR;
4272 if (!AscFindSignature(asc_dvc->iop_base)) { 4101 if (!AscFindSignature(asc_dvc->iop_base)) {
4273 asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; 4102 asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
4274 return warn_code; 4103 return warn_code;
4275 } 4104 }
4276 AscDisableInterrupt(iop_base); 4105 AscDisableInterrupt(iop_base);
4277 warn_code |= AscInitLram(asc_dvc); 4106 AscInitLram(asc_dvc);
4278 if (asc_dvc->err_code != 0)
4279 return UW_ERR;
4280 4107
4281 err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); 4108 err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev);
4282 if (err) { 4109 if (err) {
@@ -4336,7 +4163,7 @@ static int AdvLoadMicrocode(AdvPortAddr iop_base, const unsigned char *buf,
4336 int size, int memsize, int chksum) 4163 int size, int memsize, int chksum)
4337{ 4164{
4338 int i, j, end, len = 0; 4165 int i, j, end, len = 0;
4339 ADV_DCNT sum; 4166 u32 sum;
4340 4167
4341 AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0); 4168 AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0);
4342 4169
@@ -4382,38 +4209,72 @@ static int AdvLoadMicrocode(AdvPortAddr iop_base, const unsigned char *buf,
4382 return 0; 4209 return 0;
4383} 4210}
4384 4211
4385static void AdvBuildCarrierFreelist(struct adv_dvc_var *asc_dvc) 4212static void AdvBuildCarrierFreelist(struct adv_dvc_var *adv_dvc)
4386{ 4213{
4387 ADV_CARR_T *carrp; 4214 off_t carr_offset = 0, next_offset;
4388 ADV_SDCNT buf_size; 4215 dma_addr_t carr_paddr;
4389 ADV_PADDR carr_paddr; 4216 int carr_num = ADV_CARRIER_BUFSIZE / sizeof(ADV_CARR_T), i;
4390 4217
4391 carrp = (ADV_CARR_T *) ADV_16BALIGN(asc_dvc->carrier_buf); 4218 for (i = 0; i < carr_num; i++) {
4392 asc_dvc->carr_freelist = NULL; 4219 carr_offset = i * sizeof(ADV_CARR_T);
4393 if (carrp == asc_dvc->carrier_buf) { 4220 /* Get physical address of the carrier 'carrp'. */
4394 buf_size = ADV_CARRIER_BUFSIZE; 4221 carr_paddr = adv_dvc->carrier_addr + carr_offset;
4395 } else { 4222
4396 buf_size = ADV_CARRIER_BUFSIZE - sizeof(ADV_CARR_T); 4223 adv_dvc->carrier[i].carr_pa = cpu_to_le32(carr_paddr);
4224 adv_dvc->carrier[i].carr_va = cpu_to_le32(carr_offset);
4225 adv_dvc->carrier[i].areq_vpa = 0;
4226 next_offset = carr_offset + sizeof(ADV_CARR_T);
4227 if (i == carr_num)
4228 next_offset = ~0;
4229 adv_dvc->carrier[i].next_vpa = cpu_to_le32(next_offset);
4397 } 4230 }
4231 /*
4232 * We cannot have a carrier with 'carr_va' of '0', as
4233 * a reference to this carrier would be interpreted as
4234 * list termination.
4235 * So start at carrier 1 with the freelist.
4236 */
4237 adv_dvc->carr_freelist = &adv_dvc->carrier[1];
4238}
4398 4239
4399 do { 4240static ADV_CARR_T *adv_get_carrier(struct adv_dvc_var *adv_dvc, u32 offset)
4400 /* Get physical address of the carrier 'carrp'. */ 4241{
4401 carr_paddr = cpu_to_le32(virt_to_bus(carrp)); 4242 int index;
4402 4243
4403 buf_size -= sizeof(ADV_CARR_T); 4244 BUG_ON(offset > ADV_CARRIER_BUFSIZE);
4404 4245
4405 carrp->carr_pa = carr_paddr; 4246 index = offset / sizeof(ADV_CARR_T);
4406 carrp->carr_va = cpu_to_le32(ADV_VADDR_TO_U32(carrp)); 4247 return &adv_dvc->carrier[index];
4248}
4407 4249
4408 /* 4250static ADV_CARR_T *adv_get_next_carrier(struct adv_dvc_var *adv_dvc)
4409 * Insert the carrier at the beginning of the freelist. 4251{
4410 */ 4252 ADV_CARR_T *carrp = adv_dvc->carr_freelist;
4411 carrp->next_vpa = 4253 u32 next_vpa = le32_to_cpu(carrp->next_vpa);
4412 cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->carr_freelist)); 4254
4413 asc_dvc->carr_freelist = carrp; 4255 if (next_vpa == 0 || next_vpa == ~0) {
4256 ASC_DBG(1, "invalid vpa offset 0x%x\n", next_vpa);
4257 return NULL;
4258 }
4259
4260 adv_dvc->carr_freelist = adv_get_carrier(adv_dvc, next_vpa);
4261 /*
4262 * insert stopper carrier to terminate list
4263 */
4264 carrp->next_vpa = cpu_to_le32(ADV_CQ_STOPPER);
4265
4266 return carrp;
4267}
4268
4269/*
4270 * 'offset' is the index in the request pointer array
4271 */
4272static adv_req_t * adv_get_reqp(struct adv_dvc_var *adv_dvc, u32 offset)
4273{
4274 struct asc_board *boardp = adv_dvc->drv_ptr;
4414 4275
4415 carrp++; 4276 BUG_ON(offset > adv_dvc->max_host_qng);
4416 } while (buf_size > 0); 4277 return &boardp->adv_reqp[offset];
4417} 4278}
4418 4279
4419/* 4280/*
@@ -4432,10 +4293,9 @@ static void AdvBuildCarrierFreelist(struct adv_dvc_var *asc_dvc)
4432 */ 4293 */
4433static int 4294static int
4434AdvSendIdleCmd(ADV_DVC_VAR *asc_dvc, 4295AdvSendIdleCmd(ADV_DVC_VAR *asc_dvc,
4435 ushort idle_cmd, ADV_DCNT idle_cmd_parameter) 4296 ushort idle_cmd, u32 idle_cmd_parameter)
4436{ 4297{
4437 int result; 4298 int result, i, j;
4438 ADV_DCNT i, j;
4439 AdvPortAddr iop_base; 4299 AdvPortAddr iop_base;
4440 4300
4441 iop_base = asc_dvc->iop_base; 4301 iop_base = asc_dvc->iop_base;
@@ -4902,17 +4762,11 @@ static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc)
4902 * Set-up the Host->RISC Initiator Command Queue (ICQ). 4762 * Set-up the Host->RISC Initiator Command Queue (ICQ).
4903 */ 4763 */
4904 4764
4905 if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) { 4765 asc_dvc->icq_sp = adv_get_next_carrier(asc_dvc);
4766 if (!asc_dvc->icq_sp) {
4906 asc_dvc->err_code |= ASC_IERR_NO_CARRIER; 4767 asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
4907 return ADV_ERROR; 4768 return ADV_ERROR;
4908 } 4769 }
4909 asc_dvc->carr_freelist = (ADV_CARR_T *)
4910 ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa));
4911
4912 /*
4913 * The first command issued will be placed in the stopper carrier.
4914 */
4915 asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
4916 4770
4917 /* 4771 /*
4918 * Set RISC ICQ physical address start value. 4772 * Set RISC ICQ physical address start value.
@@ -4922,21 +4776,11 @@ static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc)
4922 /* 4776 /*
4923 * Set-up the RISC->Host Initiator Response Queue (IRQ). 4777 * Set-up the RISC->Host Initiator Response Queue (IRQ).
4924 */ 4778 */
4925 if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) { 4779 asc_dvc->irq_sp = adv_get_next_carrier(asc_dvc);
4780 if (!asc_dvc->irq_sp) {
4926 asc_dvc->err_code |= ASC_IERR_NO_CARRIER; 4781 asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
4927 return ADV_ERROR; 4782 return ADV_ERROR;
4928 } 4783 }
4929 asc_dvc->carr_freelist = (ADV_CARR_T *)
4930 ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa));
4931
4932 /*
4933 * The first command completed by the RISC will be placed in
4934 * the stopper.
4935 *
4936 * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is
4937 * completed the RISC will set the ASC_RQ_STOPPER bit.
4938 */
4939 asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
4940 4784
4941 /* 4785 /*
4942 * Set RISC IRQ physical address start value. 4786 * Set RISC IRQ physical address start value.
@@ -5399,17 +5243,12 @@ static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc)
5399 * Set-up the Host->RISC Initiator Command Queue (ICQ). 5243 * Set-up the Host->RISC Initiator Command Queue (ICQ).
5400 */ 5244 */
5401 5245
5402 if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) { 5246 asc_dvc->icq_sp = adv_get_next_carrier(asc_dvc);
5247 if (!asc_dvc->icq_sp) {
5248 ASC_DBG(0, "Failed to get ICQ carrier\n");
5403 asc_dvc->err_code |= ASC_IERR_NO_CARRIER; 5249 asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
5404 return ADV_ERROR; 5250 return ADV_ERROR;
5405 } 5251 }
5406 asc_dvc->carr_freelist = (ADV_CARR_T *)
5407 ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa));
5408
5409 /*
5410 * The first command issued will be placed in the stopper carrier.
5411 */
5412 asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
5413 5252
5414 /* 5253 /*
5415 * Set RISC ICQ physical address start value. 5254 * Set RISC ICQ physical address start value.
@@ -5420,21 +5259,12 @@ static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc)
5420 /* 5259 /*
5421 * Set-up the RISC->Host Initiator Response Queue (IRQ). 5260 * Set-up the RISC->Host Initiator Response Queue (IRQ).
5422 */ 5261 */
5423 if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) { 5262 asc_dvc->irq_sp = adv_get_next_carrier(asc_dvc);
5263 if (!asc_dvc->irq_sp) {
5264 ASC_DBG(0, "Failed to get IRQ carrier\n");
5424 asc_dvc->err_code |= ASC_IERR_NO_CARRIER; 5265 asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
5425 return ADV_ERROR; 5266 return ADV_ERROR;
5426 } 5267 }
5427 asc_dvc->carr_freelist = (ADV_CARR_T *)
5428 ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa));
5429
5430 /*
5431 * The first command completed by the RISC will be placed in
5432 * the stopper.
5433 *
5434 * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is
5435 * completed the RISC will set the ASC_RQ_STOPPER bit.
5436 */
5437 asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
5438 5268
5439 /* 5269 /*
5440 * Set RISC IRQ physical address start value. 5270 * Set RISC IRQ physical address start value.
@@ -5909,17 +5739,11 @@ static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc)
5909 /* 5739 /*
5910 * Set-up the Host->RISC Initiator Command Queue (ICQ). 5740 * Set-up the Host->RISC Initiator Command Queue (ICQ).
5911 */ 5741 */
5912 if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) { 5742 asc_dvc->icq_sp = adv_get_next_carrier(asc_dvc);
5743 if (!asc_dvc->icq_sp) {
5913 asc_dvc->err_code |= ASC_IERR_NO_CARRIER; 5744 asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
5914 return ADV_ERROR; 5745 return ADV_ERROR;
5915 } 5746 }
5916 asc_dvc->carr_freelist = (ADV_CARR_T *)
5917 ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa));
5918
5919 /*
5920 * The first command issued will be placed in the stopper carrier.
5921 */
5922 asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
5923 5747
5924 /* 5748 /*
5925 * Set RISC ICQ physical address start value. Initialize the 5749 * Set RISC ICQ physical address start value. Initialize the
@@ -5933,21 +5757,11 @@ static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc)
5933 /* 5757 /*
5934 * Set-up the RISC->Host Initiator Response Queue (IRQ). 5758 * Set-up the RISC->Host Initiator Response Queue (IRQ).
5935 */ 5759 */
5936 if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) { 5760 asc_dvc->irq_sp = adv_get_next_carrier(asc_dvc);
5761 if (!asc_dvc->irq_sp) {
5937 asc_dvc->err_code |= ASC_IERR_NO_CARRIER; 5762 asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
5938 return ADV_ERROR; 5763 return ADV_ERROR;
5939 } 5764 }
5940 asc_dvc->carr_freelist = (ADV_CARR_T *)
5941 ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa));
5942
5943 /*
5944 * The first command completed by the RISC will be placed in
5945 * the stopper.
5946 *
5947 * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is
5948 * completed the RISC will set the ASC_RQ_STOPPER bit.
5949 */
5950 asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
5951 5765
5952 /* 5766 /*
5953 * Set RISC IRQ physical address start value. 5767 * Set RISC IRQ physical address start value.
@@ -6134,15 +5948,16 @@ static void adv_async_callback(ADV_DVC_VAR *adv_dvc_varp, uchar code)
6134 */ 5948 */
6135static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp) 5949static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
6136{ 5950{
6137 struct asc_board *boardp; 5951 struct asc_board *boardp = adv_dvc_varp->drv_ptr;
5952 u32 srb_tag;
6138 adv_req_t *reqp; 5953 adv_req_t *reqp;
6139 adv_sgblk_t *sgblkp; 5954 adv_sgblk_t *sgblkp;
6140 struct scsi_cmnd *scp; 5955 struct scsi_cmnd *scp;
6141 struct Scsi_Host *shost; 5956 u32 resid_cnt;
6142 ADV_DCNT resid_cnt; 5957 dma_addr_t sense_addr;
6143 5958
6144 ASC_DBG(1, "adv_dvc_varp 0x%lx, scsiqp 0x%lx\n", 5959 ASC_DBG(1, "adv_dvc_varp 0x%p, scsiqp 0x%p\n",
6145 (ulong)adv_dvc_varp, (ulong)scsiqp); 5960 adv_dvc_varp, scsiqp);
6146 ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp); 5961 ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp);
6147 5962
6148 /* 5963 /*
@@ -6150,22 +5965,9 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
6150 * completed. The adv_req_t structure actually contains the 5965 * completed. The adv_req_t structure actually contains the
6151 * completed ADV_SCSI_REQ_Q structure. 5966 * completed ADV_SCSI_REQ_Q structure.
6152 */ 5967 */
6153 reqp = (adv_req_t *)ADV_U32_TO_VADDR(scsiqp->srb_ptr); 5968 srb_tag = le32_to_cpu(scsiqp->srb_tag);
6154 ASC_DBG(1, "reqp 0x%lx\n", (ulong)reqp); 5969 scp = scsi_host_find_tag(boardp->shost, scsiqp->srb_tag);
6155 if (reqp == NULL) {
6156 ASC_PRINT("adv_isr_callback: reqp is NULL\n");
6157 return;
6158 }
6159 5970
6160 /*
6161 * Get the struct scsi_cmnd structure and Scsi_Host structure for the
6162 * command that has been completed.
6163 *
6164 * Note: The adv_req_t request structure and adv_sgblk_t structure,
6165 * if any, are dropped, because a board structure pointer can not be
6166 * determined.
6167 */
6168 scp = reqp->cmndp;
6169 ASC_DBG(1, "scp 0x%p\n", scp); 5971 ASC_DBG(1, "scp 0x%p\n", scp);
6170 if (scp == NULL) { 5972 if (scp == NULL) {
6171 ASC_PRINT 5973 ASC_PRINT
@@ -6174,12 +5976,25 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
6174 } 5976 }
6175 ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len); 5977 ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len);
6176 5978
6177 shost = scp->device->host; 5979 reqp = (adv_req_t *)scp->host_scribble;
6178 ASC_STATS(shost, callback); 5980 ASC_DBG(1, "reqp 0x%lx\n", (ulong)reqp);
6179 ASC_DBG(1, "shost 0x%p\n", shost); 5981 if (reqp == NULL) {
5982 ASC_PRINT("adv_isr_callback: reqp is NULL\n");
5983 return;
5984 }
5985 /*
5986 * Remove backreferences to avoid duplicate
5987 * command completions.
5988 */
5989 scp->host_scribble = NULL;
5990 reqp->cmndp = NULL;
5991
5992 ASC_STATS(boardp->shost, callback);
5993 ASC_DBG(1, "shost 0x%p\n", boardp->shost);
6180 5994
6181 boardp = shost_priv(shost); 5995 sense_addr = le32_to_cpu(scsiqp->sense_addr);
6182 BUG_ON(adv_dvc_varp != &boardp->dvc_var.adv_dvc_var); 5996 dma_unmap_single(boardp->dev, sense_addr,
5997 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
6183 5998
6184 /* 5999 /*
6185 * 'done_status' contains the command's ending status. 6000 * 'done_status' contains the command's ending status.
@@ -6272,18 +6087,10 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
6272 /* Remove 'sgblkp' from the request list. */ 6087 /* Remove 'sgblkp' from the request list. */
6273 reqp->sgblkp = sgblkp->next_sgblkp; 6088 reqp->sgblkp = sgblkp->next_sgblkp;
6274 6089
6275 /* Add 'sgblkp' to the board free list. */ 6090 dma_pool_free(boardp->adv_sgblk_pool, sgblkp,
6276 sgblkp->next_sgblkp = boardp->adv_sgblkp; 6091 sgblkp->sg_addr);
6277 boardp->adv_sgblkp = sgblkp;
6278 } 6092 }
6279 6093
6280 /*
6281 * Free the adv_req_t structure used with the command by adding
6282 * it back to the board free list.
6283 */
6284 reqp->next_reqp = boardp->adv_reqp;
6285 boardp->adv_reqp = reqp;
6286
6287 ASC_DBG(1, "done\n"); 6094 ASC_DBG(1, "done\n");
6288} 6095}
6289 6096
@@ -6312,8 +6119,9 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc)
6312 uchar int_stat; 6119 uchar int_stat;
6313 ushort target_bit; 6120 ushort target_bit;
6314 ADV_CARR_T *free_carrp; 6121 ADV_CARR_T *free_carrp;
6315 ADV_VADDR irq_next_vpa; 6122 __le32 irq_next_vpa;
6316 ADV_SCSI_REQ_Q *scsiq; 6123 ADV_SCSI_REQ_Q *scsiq;
6124 adv_req_t *reqp;
6317 6125
6318 iop_base = asc_dvc->iop_base; 6126 iop_base = asc_dvc->iop_base;
6319 6127
@@ -6356,25 +6164,28 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc)
6356 * Check if the IRQ stopper carrier contains a completed request. 6164 * Check if the IRQ stopper carrier contains a completed request.
6357 */ 6165 */
6358 while (((irq_next_vpa = 6166 while (((irq_next_vpa =
6359 le32_to_cpu(asc_dvc->irq_sp->next_vpa)) & ASC_RQ_DONE) != 0) { 6167 le32_to_cpu(asc_dvc->irq_sp->next_vpa)) & ADV_RQ_DONE) != 0) {
6360 /* 6168 /*
6361 * Get a pointer to the newly completed ADV_SCSI_REQ_Q structure. 6169 * Get a pointer to the newly completed ADV_SCSI_REQ_Q structure.
6362 * The RISC will have set 'areq_vpa' to a virtual address. 6170 * The RISC will have set 'areq_vpa' to a virtual address.
6363 * 6171 *
6364 * The firmware will have copied the ASC_SCSI_REQ_Q.scsiq_ptr 6172 * The firmware will have copied the ADV_SCSI_REQ_Q.scsiq_ptr
6365 * field to the carrier ADV_CARR_T.areq_vpa field. The conversion 6173 * field to the carrier ADV_CARR_T.areq_vpa field. The conversion
6366 * below complements the conversion of ASC_SCSI_REQ_Q.scsiq_ptr' 6174 * below complements the conversion of ADV_SCSI_REQ_Q.scsiq_ptr'
6367 * in AdvExeScsiQueue(). 6175 * in AdvExeScsiQueue().
6368 */ 6176 */
6369 scsiq = (ADV_SCSI_REQ_Q *) 6177 u32 pa_offset = le32_to_cpu(asc_dvc->irq_sp->areq_vpa);
6370 ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->areq_vpa)); 6178 ASC_DBG(1, "irq_sp %p areq_vpa %u\n",
6179 asc_dvc->irq_sp, pa_offset);
6180 reqp = adv_get_reqp(asc_dvc, pa_offset);
6181 scsiq = &reqp->scsi_req_q;
6371 6182
6372 /* 6183 /*
6373 * Request finished with good status and the queue was not 6184 * Request finished with good status and the queue was not
6374 * DMAed to host memory by the firmware. Set all status fields 6185 * DMAed to host memory by the firmware. Set all status fields
6375 * to indicate good status. 6186 * to indicate good status.
6376 */ 6187 */
6377 if ((irq_next_vpa & ASC_RQ_GOOD) != 0) { 6188 if ((irq_next_vpa & ADV_RQ_GOOD) != 0) {
6378 scsiq->done_status = QD_NO_ERROR; 6189 scsiq->done_status = QD_NO_ERROR;
6379 scsiq->host_status = scsiq->scsi_status = 0; 6190 scsiq->host_status = scsiq->scsi_status = 0;
6380 scsiq->data_cnt = 0L; 6191 scsiq->data_cnt = 0L;
@@ -6386,11 +6197,10 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc)
6386 * stopper carrier. 6197 * stopper carrier.
6387 */ 6198 */
6388 free_carrp = asc_dvc->irq_sp; 6199 free_carrp = asc_dvc->irq_sp;
6389 asc_dvc->irq_sp = (ADV_CARR_T *) 6200 asc_dvc->irq_sp = adv_get_carrier(asc_dvc,
6390 ADV_U32_TO_VADDR(ASC_GET_CARRP(irq_next_vpa)); 6201 ADV_GET_CARRP(irq_next_vpa));
6391 6202
6392 free_carrp->next_vpa = 6203 free_carrp->next_vpa = asc_dvc->carr_freelist->carr_va;
6393 cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->carr_freelist));
6394 asc_dvc->carr_freelist = free_carrp; 6204 asc_dvc->carr_freelist = free_carrp;
6395 asc_dvc->carr_pending_cnt--; 6205 asc_dvc->carr_pending_cnt--;
6396 6206
@@ -6405,7 +6215,6 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc)
6405 * Notify the driver of the completed request by passing 6215 * Notify the driver of the completed request by passing
6406 * the ADV_SCSI_REQ_Q pointer to its callback function. 6216 * the ADV_SCSI_REQ_Q pointer to its callback function.
6407 */ 6217 */
6408 scsiq->a_flag |= ADV_SCSIQ_DONE;
6409 adv_isr_callback(asc_dvc, scsiq); 6218 adv_isr_callback(asc_dvc, scsiq);
6410 /* 6219 /*
6411 * Note: After the driver callback function is called, 'scsiq' 6220 * Note: After the driver callback function is called, 'scsiq'
@@ -6521,11 +6330,11 @@ AscCalSDTRData(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar syn_offset)
6521 return byte; 6330 return byte;
6522} 6331}
6523 6332
6524static int AscSetChipSynRegAtID(PortAddr iop_base, uchar id, uchar sdtr_data) 6333static bool AscSetChipSynRegAtID(PortAddr iop_base, uchar id, uchar sdtr_data)
6525{ 6334{
6526 ASC_SCSI_BIT_ID_TYPE org_id; 6335 ASC_SCSI_BIT_ID_TYPE org_id;
6527 int i; 6336 int i;
6528 int sta = TRUE; 6337 bool sta = true;
6529 6338
6530 AscSetBank(iop_base, 1); 6339 AscSetBank(iop_base, 1);
6531 org_id = AscReadChipDvcID(iop_base); 6340 org_id = AscReadChipDvcID(iop_base);
@@ -6539,10 +6348,10 @@ static int AscSetChipSynRegAtID(PortAddr iop_base, uchar id, uchar sdtr_data)
6539 AscSetBank(iop_base, 0); 6348 AscSetBank(iop_base, 0);
6540 AscSetChipSyn(iop_base, sdtr_data); 6349 AscSetChipSyn(iop_base, sdtr_data);
6541 if (AscGetChipSyn(iop_base) != sdtr_data) { 6350 if (AscGetChipSyn(iop_base) != sdtr_data) {
6542 sta = FALSE; 6351 sta = false;
6543 } 6352 }
6544 } else { 6353 } else {
6545 sta = FALSE; 6354 sta = false;
6546 } 6355 }
6547 AscSetBank(iop_base, 1); 6356 AscSetBank(iop_base, 1);
6548 AscWriteChipDvcID(iop_base, org_id); 6357 AscWriteChipDvcID(iop_base, org_id);
@@ -6556,12 +6365,12 @@ static void AscSetChipSDTR(PortAddr iop_base, uchar sdtr_data, uchar tid_no)
6556 AscPutMCodeSDTRDoneAtID(iop_base, tid_no, sdtr_data); 6365 AscPutMCodeSDTRDoneAtID(iop_base, tid_no, sdtr_data);
6557} 6366}
6558 6367
6559static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) 6368static void AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
6560{ 6369{
6561 EXT_MSG ext_msg; 6370 EXT_MSG ext_msg;
6562 EXT_MSG out_msg; 6371 EXT_MSG out_msg;
6563 ushort halt_q_addr; 6372 ushort halt_q_addr;
6564 int sdtr_accept; 6373 bool sdtr_accept;
6565 ushort int_halt_code; 6374 ushort int_halt_code;
6566 ASC_SCSI_BIT_ID_TYPE scsi_busy; 6375 ASC_SCSI_BIT_ID_TYPE scsi_busy;
6567 ASC_SCSI_BIT_ID_TYPE target_id; 6376 ASC_SCSI_BIT_ID_TYPE target_id;
@@ -6603,14 +6412,14 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
6603 boardp->sdtr_data[tid_no] = 0; 6412 boardp->sdtr_data[tid_no] = 0;
6604 } 6413 }
6605 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); 6414 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
6606 return (0); 6415 return;
6607 } else if (int_halt_code == ASC_HALT_ENABLE_ASYN_USE_SYN_FIX) { 6416 } else if (int_halt_code == ASC_HALT_ENABLE_ASYN_USE_SYN_FIX) {
6608 if (asc_dvc->pci_fix_asyn_xfer & target_id) { 6417 if (asc_dvc->pci_fix_asyn_xfer & target_id) {
6609 AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); 6418 AscSetChipSDTR(iop_base, asyn_sdtr, tid_no);
6610 boardp->sdtr_data[tid_no] = asyn_sdtr; 6419 boardp->sdtr_data[tid_no] = asyn_sdtr;
6611 } 6420 }
6612 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); 6421 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
6613 return (0); 6422 return;
6614 } else if (int_halt_code == ASC_HALT_EXTMSG_IN) { 6423 } else if (int_halt_code == ASC_HALT_EXTMSG_IN) {
6615 AscMemWordCopyPtrFromLram(iop_base, 6424 AscMemWordCopyPtrFromLram(iop_base,
6616 ASCV_MSGIN_BEG, 6425 ASCV_MSGIN_BEG,
@@ -6620,10 +6429,10 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
6620 if (ext_msg.msg_type == EXTENDED_MESSAGE && 6429 if (ext_msg.msg_type == EXTENDED_MESSAGE &&
6621 ext_msg.msg_req == EXTENDED_SDTR && 6430 ext_msg.msg_req == EXTENDED_SDTR &&
6622 ext_msg.msg_len == MS_SDTR_LEN) { 6431 ext_msg.msg_len == MS_SDTR_LEN) {
6623 sdtr_accept = TRUE; 6432 sdtr_accept = true;
6624 if ((ext_msg.req_ack_offset > ASC_SYN_MAX_OFFSET)) { 6433 if ((ext_msg.req_ack_offset > ASC_SYN_MAX_OFFSET)) {
6625 6434
6626 sdtr_accept = FALSE; 6435 sdtr_accept = false;
6627 ext_msg.req_ack_offset = ASC_SYN_MAX_OFFSET; 6436 ext_msg.req_ack_offset = ASC_SYN_MAX_OFFSET;
6628 } 6437 }
6629 if ((ext_msg.xfer_period < 6438 if ((ext_msg.xfer_period <
@@ -6631,7 +6440,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
6631 || (ext_msg.xfer_period > 6440 || (ext_msg.xfer_period >
6632 asc_dvc->sdtr_period_tbl[asc_dvc-> 6441 asc_dvc->sdtr_period_tbl[asc_dvc->
6633 max_sdtr_index])) { 6442 max_sdtr_index])) {
6634 sdtr_accept = FALSE; 6443 sdtr_accept = false;
6635 ext_msg.xfer_period = 6444 ext_msg.xfer_period =
6636 asc_dvc->sdtr_period_tbl[asc_dvc-> 6445 asc_dvc->sdtr_period_tbl[asc_dvc->
6637 min_sdtr_index]; 6446 min_sdtr_index];
@@ -6696,7 +6505,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
6696 (ushort)ASC_SCSIQ_B_CNTL), 6505 (ushort)ASC_SCSIQ_B_CNTL),
6697 q_cntl); 6506 q_cntl);
6698 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); 6507 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
6699 return (0); 6508 return;
6700 } else if (ext_msg.msg_type == EXTENDED_MESSAGE && 6509 } else if (ext_msg.msg_type == EXTENDED_MESSAGE &&
6701 ext_msg.msg_req == EXTENDED_WDTR && 6510 ext_msg.msg_req == EXTENDED_WDTR &&
6702 ext_msg.msg_len == MS_WDTR_LEN) { 6511 ext_msg.msg_len == MS_WDTR_LEN) {
@@ -6712,7 +6521,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
6712 (ushort)ASC_SCSIQ_B_CNTL), 6521 (ushort)ASC_SCSIQ_B_CNTL),
6713 q_cntl); 6522 q_cntl);
6714 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); 6523 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
6715 return (0); 6524 return;
6716 } else { 6525 } else {
6717 6526
6718 ext_msg.msg_type = MESSAGE_REJECT; 6527 ext_msg.msg_type = MESSAGE_REJECT;
@@ -6726,7 +6535,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
6726 (ushort)ASC_SCSIQ_B_CNTL), 6535 (ushort)ASC_SCSIQ_B_CNTL),
6727 q_cntl); 6536 q_cntl);
6728 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); 6537 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
6729 return (0); 6538 return;
6730 } 6539 }
6731 } else if (int_halt_code == ASC_HALT_CHK_CONDITION) { 6540 } else if (int_halt_code == ASC_HALT_CHK_CONDITION) {
6732 6541
@@ -6783,7 +6592,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
6783 AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy); 6592 AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy);
6784 6593
6785 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); 6594 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
6786 return (0); 6595 return;
6787 } else if (int_halt_code == ASC_HALT_SDTR_REJECTED) { 6596 } else if (int_halt_code == ASC_HALT_SDTR_REJECTED) {
6788 6597
6789 AscMemWordCopyPtrFromLram(iop_base, 6598 AscMemWordCopyPtrFromLram(iop_base,
@@ -6805,7 +6614,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
6805 (ushort)(halt_q_addr + 6614 (ushort)(halt_q_addr +
6806 (ushort)ASC_SCSIQ_B_CNTL), q_cntl); 6615 (ushort)ASC_SCSIQ_B_CNTL), q_cntl);
6807 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); 6616 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
6808 return (0); 6617 return;
6809 } else if (int_halt_code == ASC_HALT_SS_QUEUE_FULL) { 6618 } else if (int_halt_code == ASC_HALT_SS_QUEUE_FULL) {
6810 6619
6811 scsi_status = AscReadLramByte(iop_base, 6620 scsi_status = AscReadLramByte(iop_base,
@@ -6850,166 +6659,9 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
6850 } 6659 }
6851 } 6660 }
6852 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); 6661 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
6853 return (0); 6662 return;
6854 }
6855#if CC_VERY_LONG_SG_LIST
6856 else if (int_halt_code == ASC_HALT_HOST_COPY_SG_LIST_TO_RISC) {
6857 uchar q_no;
6858 ushort q_addr;
6859 uchar sg_wk_q_no;
6860 uchar first_sg_wk_q_no;
6861 ASC_SCSI_Q *scsiq; /* Ptr to driver request. */
6862 ASC_SG_HEAD *sg_head; /* Ptr to driver SG request. */
6863 ASC_SG_LIST_Q scsi_sg_q; /* Structure written to queue. */
6864 ushort sg_list_dwords;
6865 ushort sg_entry_cnt;
6866 uchar next_qp;
6867 int i;
6868
6869 q_no = AscReadLramByte(iop_base, (ushort)ASCV_REQ_SG_LIST_QP);
6870 if (q_no == ASC_QLINK_END)
6871 return 0;
6872
6873 q_addr = ASC_QNO_TO_QADDR(q_no);
6874
6875 /*
6876 * Convert the request's SRB pointer to a host ASC_SCSI_REQ
6877 * structure pointer using a macro provided by the driver.
6878 * The ASC_SCSI_REQ pointer provides a pointer to the
6879 * host ASC_SG_HEAD structure.
6880 */
6881 /* Read request's SRB pointer. */
6882 scsiq = (ASC_SCSI_Q *)
6883 ASC_SRB2SCSIQ(ASC_U32_TO_VADDR(AscReadLramDWord(iop_base,
6884 (ushort)
6885 (q_addr +
6886 ASC_SCSIQ_D_SRBPTR))));
6887
6888 /*
6889 * Get request's first and working SG queue.
6890 */
6891 sg_wk_q_no = AscReadLramByte(iop_base,
6892 (ushort)(q_addr +
6893 ASC_SCSIQ_B_SG_WK_QP));
6894
6895 first_sg_wk_q_no = AscReadLramByte(iop_base,
6896 (ushort)(q_addr +
6897 ASC_SCSIQ_B_FIRST_SG_WK_QP));
6898
6899 /*
6900 * Reset request's working SG queue back to the
6901 * first SG queue.
6902 */
6903 AscWriteLramByte(iop_base,
6904 (ushort)(q_addr +
6905 (ushort)ASC_SCSIQ_B_SG_WK_QP),
6906 first_sg_wk_q_no);
6907
6908 sg_head = scsiq->sg_head;
6909
6910 /*
6911 * Set sg_entry_cnt to the number of SG elements
6912 * that will be completed on this interrupt.
6913 *
6914 * Note: The allocated SG queues contain ASC_MAX_SG_LIST - 1
6915 * SG elements. The data_cnt and data_addr fields which
6916 * add 1 to the SG element capacity are not used when
6917 * restarting SG handling after a halt.
6918 */
6919 if (scsiq->remain_sg_entry_cnt > (ASC_MAX_SG_LIST - 1)) {
6920 sg_entry_cnt = ASC_MAX_SG_LIST - 1;
6921
6922 /*
6923 * Keep track of remaining number of SG elements that
6924 * will need to be handled on the next interrupt.
6925 */
6926 scsiq->remain_sg_entry_cnt -= (ASC_MAX_SG_LIST - 1);
6927 } else {
6928 sg_entry_cnt = scsiq->remain_sg_entry_cnt;
6929 scsiq->remain_sg_entry_cnt = 0;
6930 }
6931
6932 /*
6933 * Copy SG elements into the list of allocated SG queues.
6934 *
6935 * Last index completed is saved in scsiq->next_sg_index.
6936 */
6937 next_qp = first_sg_wk_q_no;
6938 q_addr = ASC_QNO_TO_QADDR(next_qp);
6939 scsi_sg_q.sg_head_qp = q_no;
6940 scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
6941 for (i = 0; i < sg_head->queue_cnt; i++) {
6942 scsi_sg_q.seq_no = i + 1;
6943 if (sg_entry_cnt > ASC_SG_LIST_PER_Q) {
6944 sg_list_dwords = (uchar)(ASC_SG_LIST_PER_Q * 2);
6945 sg_entry_cnt -= ASC_SG_LIST_PER_Q;
6946 /*
6947 * After very first SG queue RISC FW uses next
6948 * SG queue first element then checks sg_list_cnt
6949 * against zero and then decrements, so set
6950 * sg_list_cnt 1 less than number of SG elements
6951 * in each SG queue.
6952 */
6953 scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q - 1;
6954 scsi_sg_q.sg_cur_list_cnt =
6955 ASC_SG_LIST_PER_Q - 1;
6956 } else {
6957 /*
6958 * This is the last SG queue in the list of
6959 * allocated SG queues. If there are more
6960 * SG elements than will fit in the allocated
6961 * queues, then set the QCSG_SG_XFER_MORE flag.
6962 */
6963 if (scsiq->remain_sg_entry_cnt != 0) {
6964 scsi_sg_q.cntl |= QCSG_SG_XFER_MORE;
6965 } else {
6966 scsi_sg_q.cntl |= QCSG_SG_XFER_END;
6967 }
6968 /* equals sg_entry_cnt * 2 */
6969 sg_list_dwords = sg_entry_cnt << 1;
6970 scsi_sg_q.sg_list_cnt = sg_entry_cnt - 1;
6971 scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt - 1;
6972 sg_entry_cnt = 0;
6973 }
6974
6975 scsi_sg_q.q_no = next_qp;
6976 AscMemWordCopyPtrToLram(iop_base,
6977 q_addr + ASC_SCSIQ_SGHD_CPY_BEG,
6978 (uchar *)&scsi_sg_q,
6979 sizeof(ASC_SG_LIST_Q) >> 1);
6980
6981 AscMemDWordCopyPtrToLram(iop_base,
6982 q_addr + ASC_SGQ_LIST_BEG,
6983 (uchar *)&sg_head->
6984 sg_list[scsiq->next_sg_index],
6985 sg_list_dwords);
6986
6987 scsiq->next_sg_index += ASC_SG_LIST_PER_Q;
6988
6989 /*
6990 * If the just completed SG queue contained the
6991 * last SG element, then no more SG queues need
6992 * to be written.
6993 */
6994 if (scsi_sg_q.cntl & QCSG_SG_XFER_END) {
6995 break;
6996 }
6997
6998 next_qp = AscReadLramByte(iop_base,
6999 (ushort)(q_addr +
7000 ASC_SCSIQ_B_FWD));
7001 q_addr = ASC_QNO_TO_QADDR(next_qp);
7002 }
7003
7004 /*
7005 * Clear the halt condition so the RISC will be restarted
7006 * after the return.
7007 */
7008 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
7009 return (0);
7010 } 6663 }
7011#endif /* CC_VERY_LONG_SG_LIST */ 6664 return;
7012 return (0);
7013} 6665}
7014 6666
7015/* 6667/*
@@ -7043,7 +6695,7 @@ DvcGetQinfo(PortAddr iop_base, ushort s_addr, uchar *inbuf, int words)
7043static uchar 6695static uchar
7044_AscCopyLramScsiDoneQ(PortAddr iop_base, 6696_AscCopyLramScsiDoneQ(PortAddr iop_base,
7045 ushort q_addr, 6697 ushort q_addr,
7046 ASC_QDONE_INFO *scsiq, ASC_DCNT max_dma_count) 6698 ASC_QDONE_INFO *scsiq, unsigned int max_dma_count)
7047{ 6699{
7048 ushort _val; 6700 ushort _val;
7049 uchar sg_queue_cnt; 6701 uchar sg_queue_cnt;
@@ -7070,10 +6722,10 @@ _AscCopyLramScsiDoneQ(PortAddr iop_base,
7070 /* 6722 /*
7071 * Read high word of remain bytes from alternate location. 6723 * Read high word of remain bytes from alternate location.
7072 */ 6724 */
7073 scsiq->remain_bytes = (((ADV_DCNT)AscReadLramWord(iop_base, 6725 scsiq->remain_bytes = (((u32)AscReadLramWord(iop_base,
7074 (ushort)(q_addr + 6726 (ushort)(q_addr +
7075 (ushort) 6727 (ushort)
7076 ASC_SCSIQ_W_ALT_DC1))) 6728 ASC_SCSIQ_W_ALT_DC1)))
7077 << 16); 6729 << 16);
7078 /* 6730 /*
7079 * Read low word of remain bytes from original location. 6731 * Read low word of remain bytes from original location.
@@ -7093,25 +6745,24 @@ _AscCopyLramScsiDoneQ(PortAddr iop_base,
7093 */ 6745 */
7094static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep) 6746static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
7095{ 6747{
7096 struct asc_board *boardp; 6748 struct asc_board *boardp = asc_dvc_varp->drv_ptr;
6749 u32 srb_tag;
7097 struct scsi_cmnd *scp; 6750 struct scsi_cmnd *scp;
7098 struct Scsi_Host *shost;
7099 6751
7100 ASC_DBG(1, "asc_dvc_varp 0x%p, qdonep 0x%p\n", asc_dvc_varp, qdonep); 6752 ASC_DBG(1, "asc_dvc_varp 0x%p, qdonep 0x%p\n", asc_dvc_varp, qdonep);
7101 ASC_DBG_PRT_ASC_QDONE_INFO(2, qdonep); 6753 ASC_DBG_PRT_ASC_QDONE_INFO(2, qdonep);
7102 6754
7103 scp = advansys_srb_to_ptr(asc_dvc_varp, qdonep->d2.srb_ptr); 6755 /*
6756 * Decrease the srb_tag by 1 to find the SCSI command
6757 */
6758 srb_tag = qdonep->d2.srb_tag - 1;
6759 scp = scsi_host_find_tag(boardp->shost, srb_tag);
7104 if (!scp) 6760 if (!scp)
7105 return; 6761 return;
7106 6762
7107 ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len); 6763 ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len);
7108 6764
7109 shost = scp->device->host; 6765 ASC_STATS(boardp->shost, callback);
7110 ASC_STATS(shost, callback);
7111 ASC_DBG(1, "shost 0x%p\n", shost);
7112
7113 boardp = shost_priv(shost);
7114 BUG_ON(asc_dvc_varp != &boardp->dvc_var.asc_dvc_var);
7115 6766
7116 dma_unmap_single(boardp->dev, scp->SCp.dma_handle, 6767 dma_unmap_single(boardp->dev, scp->SCp.dma_handle,
7117 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); 6768 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
@@ -7220,7 +6871,7 @@ static int AscIsrQDone(ASC_DVC_VAR *asc_dvc)
7220 uchar cur_target_qng; 6871 uchar cur_target_qng;
7221 ASC_QDONE_INFO scsiq_buf; 6872 ASC_QDONE_INFO scsiq_buf;
7222 ASC_QDONE_INFO *scsiq; 6873 ASC_QDONE_INFO *scsiq;
7223 int false_overrun; 6874 bool false_overrun;
7224 6875
7225 iop_base = asc_dvc->iop_base; 6876 iop_base = asc_dvc->iop_base;
7226 n_q_used = 1; 6877 n_q_used = 1;
@@ -7294,14 +6945,17 @@ static int AscIsrQDone(ASC_DVC_VAR *asc_dvc)
7294 scsiq->d3.done_stat = QD_WITH_ERROR; 6945 scsiq->d3.done_stat = QD_WITH_ERROR;
7295 goto FATAL_ERR_QDONE; 6946 goto FATAL_ERR_QDONE;
7296 } 6947 }
7297 if ((scsiq->d2.srb_ptr == 0UL) || 6948 if ((scsiq->d2.srb_tag == 0UL) ||
7298 ((scsiq->q_status & QS_ABORTED) != 0)) { 6949 ((scsiq->q_status & QS_ABORTED) != 0)) {
7299 return (0x11); 6950 return (0x11);
7300 } else if (scsiq->q_status == QS_DONE) { 6951 } else if (scsiq->q_status == QS_DONE) {
7301 false_overrun = FALSE; 6952 /*
6953 * This is also curious.
6954 * false_overrun will _always_ be set to 'false'
6955 */
6956 false_overrun = false;
7302 if (scsiq->extra_bytes != 0) { 6957 if (scsiq->extra_bytes != 0) {
7303 scsiq->remain_bytes += 6958 scsiq->remain_bytes += scsiq->extra_bytes;
7304 (ADV_DCNT)scsiq->extra_bytes;
7305 } 6959 }
7306 if (scsiq->d3.done_stat == QD_WITH_ERROR) { 6960 if (scsiq->d3.done_stat == QD_WITH_ERROR) {
7307 if (scsiq->d3.host_stat == 6961 if (scsiq->d3.host_stat ==
@@ -7372,23 +7026,23 @@ static int AscISR(ASC_DVC_VAR *asc_dvc)
7372 uchar host_flag; 7026 uchar host_flag;
7373 7027
7374 iop_base = asc_dvc->iop_base; 7028 iop_base = asc_dvc->iop_base;
7375 int_pending = FALSE; 7029 int_pending = ASC_FALSE;
7376 7030
7377 if (AscIsIntPending(iop_base) == 0) 7031 if (AscIsIntPending(iop_base) == 0)
7378 return int_pending; 7032 return int_pending;
7379 7033
7380 if ((asc_dvc->init_state & ASC_INIT_STATE_END_LOAD_MC) == 0) { 7034 if ((asc_dvc->init_state & ASC_INIT_STATE_END_LOAD_MC) == 0) {
7381 return ERR; 7035 return ASC_ERROR;
7382 } 7036 }
7383 if (asc_dvc->in_critical_cnt != 0) { 7037 if (asc_dvc->in_critical_cnt != 0) {
7384 AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_ON_CRITICAL); 7038 AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_ON_CRITICAL);
7385 return ERR; 7039 return ASC_ERROR;
7386 } 7040 }
7387 if (asc_dvc->is_in_int) { 7041 if (asc_dvc->is_in_int) {
7388 AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_RE_ENTRY); 7042 AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_RE_ENTRY);
7389 return ERR; 7043 return ASC_ERROR;
7390 } 7044 }
7391 asc_dvc->is_in_int = TRUE; 7045 asc_dvc->is_in_int = true;
7392 ctrl_reg = AscGetChipControl(iop_base); 7046 ctrl_reg = AscGetChipControl(iop_base);
7393 saved_ctrl_reg = ctrl_reg & (~(CC_SCSI_RESET | CC_CHIP_RESET | 7047 saved_ctrl_reg = ctrl_reg & (~(CC_SCSI_RESET | CC_CHIP_RESET |
7394 CC_SINGLE_STEP | CC_DIAG | CC_TEST)); 7048 CC_SINGLE_STEP | CC_DIAG | CC_TEST));
@@ -7396,7 +7050,7 @@ static int AscISR(ASC_DVC_VAR *asc_dvc)
7396 if (chipstat & CSW_SCSI_RESET_LATCH) { 7050 if (chipstat & CSW_SCSI_RESET_LATCH) {
7397 if (!(asc_dvc->bus_type & (ASC_IS_VL | ASC_IS_EISA))) { 7051 if (!(asc_dvc->bus_type & (ASC_IS_VL | ASC_IS_EISA))) {
7398 int i = 10; 7052 int i = 10;
7399 int_pending = TRUE; 7053 int_pending = ASC_TRUE;
7400 asc_dvc->sdtr_done = 0; 7054 asc_dvc->sdtr_done = 0;
7401 saved_ctrl_reg &= (uchar)(~CC_HALT); 7055 saved_ctrl_reg &= (uchar)(~CC_HALT);
7402 while ((AscGetChipStatus(iop_base) & 7056 while ((AscGetChipStatus(iop_base) &
@@ -7418,15 +7072,11 @@ static int AscISR(ASC_DVC_VAR *asc_dvc)
7418 (uchar)(host_flag | (uchar)ASC_HOST_FLAG_IN_ISR)); 7072 (uchar)(host_flag | (uchar)ASC_HOST_FLAG_IN_ISR));
7419 if ((chipstat & CSW_INT_PENDING) || (int_pending)) { 7073 if ((chipstat & CSW_INT_PENDING) || (int_pending)) {
7420 AscAckInterrupt(iop_base); 7074 AscAckInterrupt(iop_base);
7421 int_pending = TRUE; 7075 int_pending = ASC_TRUE;
7422 if ((chipstat & CSW_HALTED) && (ctrl_reg & CC_SINGLE_STEP)) { 7076 if ((chipstat & CSW_HALTED) && (ctrl_reg & CC_SINGLE_STEP)) {
7423 if (AscIsrChipHalted(asc_dvc) == ERR) { 7077 AscIsrChipHalted(asc_dvc);
7424 goto ISR_REPORT_QDONE_FATAL_ERROR; 7078 saved_ctrl_reg &= (uchar)(~CC_HALT);
7425 } else {
7426 saved_ctrl_reg &= (uchar)(~CC_HALT);
7427 }
7428 } else { 7079 } else {
7429 ISR_REPORT_QDONE_FATAL_ERROR:
7430 if ((asc_dvc->dvc_cntl & ASC_CNTL_INT_MULTI_Q) != 0) { 7080 if ((asc_dvc->dvc_cntl & ASC_CNTL_INT_MULTI_Q) != 0) {
7431 while (((status = 7081 while (((status =
7432 AscIsrQDone(asc_dvc)) & 0x01) != 0) { 7082 AscIsrQDone(asc_dvc)) & 0x01) != 0) {
@@ -7440,20 +7090,20 @@ static int AscISR(ASC_DVC_VAR *asc_dvc)
7440 } while (status == 0x11); 7090 } while (status == 0x11);
7441 } 7091 }
7442 if ((status & 0x80) != 0) 7092 if ((status & 0x80) != 0)
7443 int_pending = ERR; 7093 int_pending = ASC_ERROR;
7444 } 7094 }
7445 } 7095 }
7446 AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag); 7096 AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag);
7447 AscSetChipLramAddr(iop_base, saved_ram_addr); 7097 AscSetChipLramAddr(iop_base, saved_ram_addr);
7448 AscSetChipControl(iop_base, saved_ctrl_reg); 7098 AscSetChipControl(iop_base, saved_ctrl_reg);
7449 asc_dvc->is_in_int = FALSE; 7099 asc_dvc->is_in_int = false;
7450 return int_pending; 7100 return int_pending;
7451} 7101}
7452 7102
7453/* 7103/*
7454 * advansys_reset() 7104 * advansys_reset()
7455 * 7105 *
7456 * Reset the bus associated with the command 'scp'. 7106 * Reset the host associated with the command 'scp'.
7457 * 7107 *
7458 * This function runs its own thread. Interrupts must be blocked but 7108 * This function runs its own thread. Interrupts must be blocked but
7459 * sleeping is allowed and no locking other than for host structures is 7109 * sleeping is allowed and no locking other than for host structures is
@@ -7471,7 +7121,7 @@ static int advansys_reset(struct scsi_cmnd *scp)
7471 7121
7472 ASC_STATS(shost, reset); 7122 ASC_STATS(shost, reset);
7473 7123
7474 scmd_printk(KERN_INFO, scp, "SCSI bus reset started...\n"); 7124 scmd_printk(KERN_INFO, scp, "SCSI host reset started...\n");
7475 7125
7476 if (ASC_NARROW_BOARD(boardp)) { 7126 if (ASC_NARROW_BOARD(boardp)) {
7477 ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var; 7127 ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var;
@@ -7482,20 +7132,19 @@ static int advansys_reset(struct scsi_cmnd *scp)
7482 7132
7483 /* Refer to ASC_IERR_* definitions for meaning of 'err_code'. */ 7133 /* Refer to ASC_IERR_* definitions for meaning of 'err_code'. */
7484 if (asc_dvc->err_code || !asc_dvc->overrun_dma) { 7134 if (asc_dvc->err_code || !asc_dvc->overrun_dma) {
7485 scmd_printk(KERN_INFO, scp, "SCSI bus reset error: " 7135 scmd_printk(KERN_INFO, scp, "SCSI host reset error: "
7486 "0x%x, status: 0x%x\n", asc_dvc->err_code, 7136 "0x%x, status: 0x%x\n", asc_dvc->err_code,
7487 status); 7137 status);
7488 ret = FAILED; 7138 ret = FAILED;
7489 } else if (status) { 7139 } else if (status) {
7490 scmd_printk(KERN_INFO, scp, "SCSI bus reset warning: " 7140 scmd_printk(KERN_INFO, scp, "SCSI host reset warning: "
7491 "0x%x\n", status); 7141 "0x%x\n", status);
7492 } else { 7142 } else {
7493 scmd_printk(KERN_INFO, scp, "SCSI bus reset " 7143 scmd_printk(KERN_INFO, scp, "SCSI host reset "
7494 "successful\n"); 7144 "successful\n");
7495 } 7145 }
7496 7146
7497 ASC_DBG(1, "after AscInitAsc1000Driver()\n"); 7147 ASC_DBG(1, "after AscInitAsc1000Driver()\n");
7498 spin_lock_irqsave(shost->host_lock, flags);
7499 } else { 7148 } else {
7500 /* 7149 /*
7501 * If the suggest reset bus flags are set, then reset the bus. 7150 * If the suggest reset bus flags are set, then reset the bus.
@@ -7504,28 +7153,25 @@ static int advansys_reset(struct scsi_cmnd *scp)
7504 ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var; 7153 ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var;
7505 7154
7506 /* 7155 /*
7507 * Reset the target's SCSI bus. 7156 * Reset the chip and SCSI bus.
7508 */ 7157 */
7509 ASC_DBG(1, "before AdvResetChipAndSB()\n"); 7158 ASC_DBG(1, "before AdvResetChipAndSB()\n");
7510 switch (AdvResetChipAndSB(adv_dvc)) { 7159 switch (AdvResetChipAndSB(adv_dvc)) {
7511 case ASC_TRUE: 7160 case ASC_TRUE:
7512 scmd_printk(KERN_INFO, scp, "SCSI bus reset " 7161 scmd_printk(KERN_INFO, scp, "SCSI host reset "
7513 "successful\n"); 7162 "successful\n");
7514 break; 7163 break;
7515 case ASC_FALSE: 7164 case ASC_FALSE:
7516 default: 7165 default:
7517 scmd_printk(KERN_INFO, scp, "SCSI bus reset error\n"); 7166 scmd_printk(KERN_INFO, scp, "SCSI host reset error\n");
7518 ret = FAILED; 7167 ret = FAILED;
7519 break; 7168 break;
7520 } 7169 }
7521 spin_lock_irqsave(shost->host_lock, flags); 7170 spin_lock_irqsave(shost->host_lock, flags);
7522 AdvISR(adv_dvc); 7171 AdvISR(adv_dvc);
7172 spin_unlock_irqrestore(shost->host_lock, flags);
7523 } 7173 }
7524 7174
7525 /* Save the time of the most recently completed reset. */
7526 boardp->last_reset = jiffies;
7527 spin_unlock_irqrestore(shost->host_lock, flags);
7528
7529 ASC_DBG(1, "ret %d\n", ret); 7175 ASC_DBG(1, "ret %d\n", ret);
7530 7176
7531 return ret; 7177 return ret;
@@ -7584,9 +7230,10 @@ static irqreturn_t advansys_interrupt(int irq, void *dev_id)
7584 struct Scsi_Host *shost = dev_id; 7230 struct Scsi_Host *shost = dev_id;
7585 struct asc_board *boardp = shost_priv(shost); 7231 struct asc_board *boardp = shost_priv(shost);
7586 irqreturn_t result = IRQ_NONE; 7232 irqreturn_t result = IRQ_NONE;
7233 unsigned long flags;
7587 7234
7588 ASC_DBG(2, "boardp 0x%p\n", boardp); 7235 ASC_DBG(2, "boardp 0x%p\n", boardp);
7589 spin_lock(shost->host_lock); 7236 spin_lock_irqsave(shost->host_lock, flags);
7590 if (ASC_NARROW_BOARD(boardp)) { 7237 if (ASC_NARROW_BOARD(boardp)) {
7591 if (AscIsIntPending(shost->io_port)) { 7238 if (AscIsIntPending(shost->io_port)) {
7592 result = IRQ_HANDLED; 7239 result = IRQ_HANDLED;
@@ -7601,38 +7248,38 @@ static irqreturn_t advansys_interrupt(int irq, void *dev_id)
7601 ASC_STATS(shost, interrupt); 7248 ASC_STATS(shost, interrupt);
7602 } 7249 }
7603 } 7250 }
7604 spin_unlock(shost->host_lock); 7251 spin_unlock_irqrestore(shost->host_lock, flags);
7605 7252
7606 ASC_DBG(1, "end\n"); 7253 ASC_DBG(1, "end\n");
7607 return result; 7254 return result;
7608} 7255}
7609 7256
7610static int AscHostReqRiscHalt(PortAddr iop_base) 7257static bool AscHostReqRiscHalt(PortAddr iop_base)
7611{ 7258{
7612 int count = 0; 7259 int count = 0;
7613 int sta = 0; 7260 bool sta = false;
7614 uchar saved_stop_code; 7261 uchar saved_stop_code;
7615 7262
7616 if (AscIsChipHalted(iop_base)) 7263 if (AscIsChipHalted(iop_base))
7617 return (1); 7264 return true;
7618 saved_stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B); 7265 saved_stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B);
7619 AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, 7266 AscWriteLramByte(iop_base, ASCV_STOP_CODE_B,
7620 ASC_STOP_HOST_REQ_RISC_HALT | ASC_STOP_REQ_RISC_STOP); 7267 ASC_STOP_HOST_REQ_RISC_HALT | ASC_STOP_REQ_RISC_STOP);
7621 do { 7268 do {
7622 if (AscIsChipHalted(iop_base)) { 7269 if (AscIsChipHalted(iop_base)) {
7623 sta = 1; 7270 sta = true;
7624 break; 7271 break;
7625 } 7272 }
7626 mdelay(100); 7273 mdelay(100);
7627 } while (count++ < 20); 7274 } while (count++ < 20);
7628 AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, saved_stop_code); 7275 AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, saved_stop_code);
7629 return (sta); 7276 return sta;
7630} 7277}
7631 7278
7632static int 7279static bool
7633AscSetRunChipSynRegAtID(PortAddr iop_base, uchar tid_no, uchar sdtr_data) 7280AscSetRunChipSynRegAtID(PortAddr iop_base, uchar tid_no, uchar sdtr_data)
7634{ 7281{
7635 int sta = FALSE; 7282 bool sta = false;
7636 7283
7637 if (AscHostReqRiscHalt(iop_base)) { 7284 if (AscHostReqRiscHalt(iop_base)) {
7638 sta = AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data); 7285 sta = AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data);
@@ -7851,13 +7498,17 @@ static int advansys_slave_configure(struct scsi_device *sdev)
7851 return 0; 7498 return 0;
7852} 7499}
7853 7500
7854static __le32 advansys_get_sense_buffer_dma(struct scsi_cmnd *scp) 7501static __le32 asc_get_sense_buffer_dma(struct scsi_cmnd *scp)
7855{ 7502{
7856 struct asc_board *board = shost_priv(scp->device->host); 7503 struct asc_board *board = shost_priv(scp->device->host);
7504
7857 scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer, 7505 scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer,
7858 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); 7506 SCSI_SENSE_BUFFERSIZE,
7859 dma_cache_sync(board->dev, scp->sense_buffer, 7507 DMA_FROM_DEVICE);
7860 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); 7508 if (dma_mapping_error(board->dev, scp->SCp.dma_handle)) {
7509 ASC_DBG(1, "failed to map sense buffer\n");
7510 return 0;
7511 }
7861 return cpu_to_le32(scp->SCp.dma_handle); 7512 return cpu_to_le32(scp->SCp.dma_handle);
7862} 7513}
7863 7514
@@ -7866,17 +7517,16 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
7866{ 7517{
7867 struct asc_dvc_var *asc_dvc = &boardp->dvc_var.asc_dvc_var; 7518 struct asc_dvc_var *asc_dvc = &boardp->dvc_var.asc_dvc_var;
7868 int use_sg; 7519 int use_sg;
7520 u32 srb_tag;
7869 7521
7870 memset(asc_scsi_q, 0, sizeof(*asc_scsi_q)); 7522 memset(asc_scsi_q, 0, sizeof(*asc_scsi_q));
7871 7523
7872 /* 7524 /*
7873 * Point the ASC_SCSI_Q to the 'struct scsi_cmnd'. 7525 * Set the srb_tag to the command tag + 1, as
7526 * srb_tag '0' is used internally by the chip.
7874 */ 7527 */
7875 asc_scsi_q->q2.srb_ptr = advansys_ptr_to_srb(asc_dvc, scp); 7528 srb_tag = scp->request->tag + 1;
7876 if (asc_scsi_q->q2.srb_ptr == BAD_SRB) { 7529 asc_scsi_q->q2.srb_tag = srb_tag;
7877 scp->result = HOST_BYTE(DID_SOFT_ERROR);
7878 return ASC_ERROR;
7879 }
7880 7530
7881 /* 7531 /*
7882 * Build the ASC_SCSI_Q request. 7532 * Build the ASC_SCSI_Q request.
@@ -7887,8 +7537,10 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
7887 asc_scsi_q->q1.target_lun = scp->device->lun; 7537 asc_scsi_q->q1.target_lun = scp->device->lun;
7888 asc_scsi_q->q2.target_ix = 7538 asc_scsi_q->q2.target_ix =
7889 ASC_TIDLUN_TO_IX(scp->device->id, scp->device->lun); 7539 ASC_TIDLUN_TO_IX(scp->device->id, scp->device->lun);
7890 asc_scsi_q->q1.sense_addr = advansys_get_sense_buffer_dma(scp); 7540 asc_scsi_q->q1.sense_addr = asc_get_sense_buffer_dma(scp);
7891 asc_scsi_q->q1.sense_len = SCSI_SENSE_BUFFERSIZE; 7541 asc_scsi_q->q1.sense_len = SCSI_SENSE_BUFFERSIZE;
7542 if (!asc_scsi_q->q1.sense_addr)
7543 return ASC_BUSY;
7892 7544
7893 /* 7545 /*
7894 * If there are any outstanding requests for the current target, 7546 * If there are any outstanding requests for the current target,
@@ -7910,7 +7562,10 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
7910 7562
7911 /* Build ASC_SCSI_Q */ 7563 /* Build ASC_SCSI_Q */
7912 use_sg = scsi_dma_map(scp); 7564 use_sg = scsi_dma_map(scp);
7913 if (use_sg != 0) { 7565 if (use_sg < 0) {
7566 ASC_DBG(1, "failed to map sglist\n");
7567 return ASC_BUSY;
7568 } else if (use_sg > 0) {
7914 int sgcnt; 7569 int sgcnt;
7915 struct scatterlist *slp; 7570 struct scatterlist *slp;
7916 struct asc_sg_head *asc_sg_head; 7571 struct asc_sg_head *asc_sg_head;
@@ -7975,20 +7630,19 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
7975 * ADV_ERROR(-1) - SG List creation failed 7630 * ADV_ERROR(-1) - SG List creation failed
7976 */ 7631 */
7977static int 7632static int
7978adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp, 7633adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp,
7979 int use_sg) 7634 ADV_SCSI_REQ_Q *scsiqp, struct scsi_cmnd *scp, int use_sg)
7980{ 7635{
7981 adv_sgblk_t *sgblkp; 7636 adv_sgblk_t *sgblkp, *prev_sgblkp;
7982 ADV_SCSI_REQ_Q *scsiqp;
7983 struct scatterlist *slp; 7637 struct scatterlist *slp;
7984 int sg_elem_cnt; 7638 int sg_elem_cnt;
7985 ADV_SG_BLOCK *sg_block, *prev_sg_block; 7639 ADV_SG_BLOCK *sg_block, *prev_sg_block;
7986 ADV_PADDR sg_block_paddr; 7640 dma_addr_t sgblk_paddr;
7987 int i; 7641 int i;
7988 7642
7989 scsiqp = (ADV_SCSI_REQ_Q *)ADV_32BALIGN(&reqp->scsi_req_q);
7990 slp = scsi_sglist(scp); 7643 slp = scsi_sglist(scp);
7991 sg_elem_cnt = use_sg; 7644 sg_elem_cnt = use_sg;
7645 prev_sgblkp = NULL;
7992 prev_sg_block = NULL; 7646 prev_sg_block = NULL;
7993 reqp->sgblkp = NULL; 7647 reqp->sgblkp = NULL;
7994 7648
@@ -7998,7 +7652,9 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp,
7998 * list. One 'adv_sgblk_t' structure holds NO_OF_SG_PER_BLOCK 7652 * list. One 'adv_sgblk_t' structure holds NO_OF_SG_PER_BLOCK
7999 * (15) scatter-gather elements. 7653 * (15) scatter-gather elements.
8000 */ 7654 */
8001 if ((sgblkp = boardp->adv_sgblkp) == NULL) { 7655 sgblkp = dma_pool_alloc(boardp->adv_sgblk_pool, GFP_ATOMIC,
7656 &sgblk_paddr);
7657 if (!sgblkp) {
8002 ASC_DBG(1, "no free adv_sgblk_t\n"); 7658 ASC_DBG(1, "no free adv_sgblk_t\n");
8003 ASC_STATS(scp->device->host, adv_build_nosg); 7659 ASC_STATS(scp->device->host, adv_build_nosg);
8004 7660
@@ -8009,24 +7665,16 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp,
8009 while ((sgblkp = reqp->sgblkp) != NULL) { 7665 while ((sgblkp = reqp->sgblkp) != NULL) {
8010 /* Remove 'sgblkp' from the request list. */ 7666 /* Remove 'sgblkp' from the request list. */
8011 reqp->sgblkp = sgblkp->next_sgblkp; 7667 reqp->sgblkp = sgblkp->next_sgblkp;
8012 7668 sgblkp->next_sgblkp = NULL;
8013 /* Add 'sgblkp' to the board free list. */ 7669 dma_pool_free(boardp->adv_sgblk_pool, sgblkp,
8014 sgblkp->next_sgblkp = boardp->adv_sgblkp; 7670 sgblkp->sg_addr);
8015 boardp->adv_sgblkp = sgblkp;
8016 } 7671 }
8017 return ASC_BUSY; 7672 return ASC_BUSY;
8018 } 7673 }
8019
8020 /* Complete 'adv_sgblk_t' board allocation. */ 7674 /* Complete 'adv_sgblk_t' board allocation. */
8021 boardp->adv_sgblkp = sgblkp->next_sgblkp; 7675 sgblkp->sg_addr = sgblk_paddr;
8022 sgblkp->next_sgblkp = NULL; 7676 sgblkp->next_sgblkp = NULL;
8023 7677 sg_block = &sgblkp->sg_block;
8024 /*
8025 * Get 8 byte aligned virtual and physical addresses
8026 * for the allocated ADV_SG_BLOCK structure.
8027 */
8028 sg_block = (ADV_SG_BLOCK *)ADV_8BALIGN(&sgblkp->sg_block);
8029 sg_block_paddr = virt_to_bus(sg_block);
8030 7678
8031 /* 7679 /*
8032 * Check if this is the first 'adv_sgblk_t' for the 7680 * Check if this is the first 'adv_sgblk_t' for the
@@ -8041,17 +7689,16 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp,
8041 * address pointers. 7689 * address pointers.
8042 */ 7690 */
8043 scsiqp->sg_list_ptr = sg_block; 7691 scsiqp->sg_list_ptr = sg_block;
8044 scsiqp->sg_real_addr = cpu_to_le32(sg_block_paddr); 7692 scsiqp->sg_real_addr = cpu_to_le32(sgblk_paddr);
8045 } else { 7693 } else {
8046 /* Request's second or later scatter-gather block. */ 7694 /* Request's second or later scatter-gather block. */
8047 sgblkp->next_sgblkp = reqp->sgblkp; 7695 prev_sgblkp->next_sgblkp = sgblkp;
8048 reqp->sgblkp = sgblkp;
8049 7696
8050 /* 7697 /*
8051 * Point the previous ADV_SG_BLOCK structure to 7698 * Point the previous ADV_SG_BLOCK structure to
8052 * the newly allocated ADV_SG_BLOCK structure. 7699 * the newly allocated ADV_SG_BLOCK structure.
8053 */ 7700 */
8054 prev_sg_block->sg_ptr = cpu_to_le32(sg_block_paddr); 7701 prev_sg_block->sg_ptr = cpu_to_le32(sgblk_paddr);
8055 } 7702 }
8056 7703
8057 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) { 7704 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
@@ -8062,15 +7709,19 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp,
8062 ASC_STATS_ADD(scp->device->host, xfer_sect, 7709 ASC_STATS_ADD(scp->device->host, xfer_sect,
8063 DIV_ROUND_UP(sg_dma_len(slp), 512)); 7710 DIV_ROUND_UP(sg_dma_len(slp), 512));
8064 7711
8065 if (--sg_elem_cnt == 0) { /* Last ADV_SG_BLOCK and scatter-gather entry. */ 7712 if (--sg_elem_cnt == 0) {
7713 /*
7714 * Last ADV_SG_BLOCK and scatter-gather entry.
7715 */
8066 sg_block->sg_cnt = i + 1; 7716 sg_block->sg_cnt = i + 1;
8067 sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */ 7717 sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */
8068 return ADV_SUCCESS; 7718 return ADV_SUCCESS;
8069 } 7719 }
8070 slp++; 7720 slp++;
8071 } 7721 }
8072 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK; 7722 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
8073 prev_sg_block = sg_block; 7723 prev_sg_block = sg_block;
7724 prev_sgblkp = sgblkp;
8074 } 7725 }
8075} 7726}
8076 7727
@@ -8080,38 +7731,35 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp,
8080 * If an adv_req_t can not be allocated to issue the request, 7731 * If an adv_req_t can not be allocated to issue the request,
8081 * then return ASC_BUSY. If an error occurs, then return ASC_ERROR. 7732 * then return ASC_BUSY. If an error occurs, then return ASC_ERROR.
8082 * 7733 *
8083 * Multi-byte fields in the ASC_SCSI_REQ_Q that are used by the 7734 * Multi-byte fields in the ADV_SCSI_REQ_Q that are used by the
8084 * microcode for DMA addresses or math operations are byte swapped 7735 * microcode for DMA addresses or math operations are byte swapped
8085 * to little-endian order. 7736 * to little-endian order.
8086 */ 7737 */
8087static int 7738static int
8088adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, 7739adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
8089 ADV_SCSI_REQ_Q **adv_scsiqpp) 7740 adv_req_t **adv_reqpp)
8090{ 7741{
7742 u32 srb_tag = scp->request->tag;
8091 adv_req_t *reqp; 7743 adv_req_t *reqp;
8092 ADV_SCSI_REQ_Q *scsiqp; 7744 ADV_SCSI_REQ_Q *scsiqp;
8093 int i;
8094 int ret; 7745 int ret;
8095 int use_sg; 7746 int use_sg;
7747 dma_addr_t sense_addr;
8096 7748
8097 /* 7749 /*
8098 * Allocate an adv_req_t structure from the board to execute 7750 * Allocate an adv_req_t structure from the board to execute
8099 * the command. 7751 * the command.
8100 */ 7752 */
8101 if (boardp->adv_reqp == NULL) { 7753 reqp = &boardp->adv_reqp[srb_tag];
7754 if (reqp->cmndp && reqp->cmndp != scp ) {
8102 ASC_DBG(1, "no free adv_req_t\n"); 7755 ASC_DBG(1, "no free adv_req_t\n");
8103 ASC_STATS(scp->device->host, adv_build_noreq); 7756 ASC_STATS(scp->device->host, adv_build_noreq);
8104 return ASC_BUSY; 7757 return ASC_BUSY;
8105 } else {
8106 reqp = boardp->adv_reqp;
8107 boardp->adv_reqp = reqp->next_reqp;
8108 reqp->next_reqp = NULL;
8109 } 7758 }
8110 7759
8111 /* 7760 reqp->req_addr = boardp->adv_reqp_addr + (srb_tag * sizeof(adv_req_t));
8112 * Get 32-byte aligned ADV_SCSI_REQ_Q and ADV_SG_BLOCK pointers. 7761
8113 */ 7762 scsiqp = &reqp->scsi_req_q;
8114 scsiqp = (ADV_SCSI_REQ_Q *)ADV_32BALIGN(&reqp->scsi_req_q);
8115 7763
8116 /* 7764 /*
8117 * Initialize the structure. 7765 * Initialize the structure.
@@ -8119,14 +7767,15 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
8119 scsiqp->cntl = scsiqp->scsi_cntl = scsiqp->done_status = 0; 7767 scsiqp->cntl = scsiqp->scsi_cntl = scsiqp->done_status = 0;
8120 7768
8121 /* 7769 /*
8122 * Set the ADV_SCSI_REQ_Q 'srb_ptr' to point to the adv_req_t structure. 7770 * Set the srb_tag to the command tag.
8123 */ 7771 */
8124 scsiqp->srb_ptr = ADV_VADDR_TO_U32(reqp); 7772 scsiqp->srb_tag = srb_tag;
8125 7773
8126 /* 7774 /*
8127 * Set the adv_req_t 'cmndp' to point to the struct scsi_cmnd structure. 7775 * Set 'host_scribble' to point to the adv_req_t structure.
8128 */ 7776 */
8129 reqp->cmndp = scp; 7777 reqp->cmndp = scp;
7778 scp->host_scribble = (void *)reqp;
8130 7779
8131 /* 7780 /*
8132 * Build the ADV_SCSI_REQ_Q request. 7781 * Build the ADV_SCSI_REQ_Q request.
@@ -8135,28 +7784,38 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
8135 /* Set CDB length and copy it to the request structure. */ 7784 /* Set CDB length and copy it to the request structure. */
8136 scsiqp->cdb_len = scp->cmd_len; 7785 scsiqp->cdb_len = scp->cmd_len;
8137 /* Copy first 12 CDB bytes to cdb[]. */ 7786 /* Copy first 12 CDB bytes to cdb[]. */
8138 for (i = 0; i < scp->cmd_len && i < 12; i++) { 7787 memcpy(scsiqp->cdb, scp->cmnd, scp->cmd_len < 12 ? scp->cmd_len : 12);
8139 scsiqp->cdb[i] = scp->cmnd[i];
8140 }
8141 /* Copy last 4 CDB bytes, if present, to cdb16[]. */ 7788 /* Copy last 4 CDB bytes, if present, to cdb16[]. */
8142 for (; i < scp->cmd_len; i++) { 7789 if (scp->cmd_len > 12) {
8143 scsiqp->cdb16[i - 12] = scp->cmnd[i]; 7790 int cdb16_len = scp->cmd_len - 12;
7791
7792 memcpy(scsiqp->cdb16, &scp->cmnd[12], cdb16_len);
8144 } 7793 }
8145 7794
8146 scsiqp->target_id = scp->device->id; 7795 scsiqp->target_id = scp->device->id;
8147 scsiqp->target_lun = scp->device->lun; 7796 scsiqp->target_lun = scp->device->lun;
8148 7797
8149 scsiqp->sense_addr = cpu_to_le32(virt_to_bus(&scp->sense_buffer[0])); 7798 sense_addr = dma_map_single(boardp->dev, scp->sense_buffer,
8150 scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE; 7799 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
7800 if (dma_mapping_error(boardp->dev, sense_addr)) {
7801 ASC_DBG(1, "failed to map sense buffer\n");
7802 ASC_STATS(scp->device->host, adv_build_noreq);
7803 return ASC_BUSY;
7804 }
7805 scsiqp->sense_addr = cpu_to_le32(sense_addr);
7806 scsiqp->sense_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
8151 7807
8152 /* Build ADV_SCSI_REQ_Q */ 7808 /* Build ADV_SCSI_REQ_Q */
8153 7809
8154 use_sg = scsi_dma_map(scp); 7810 use_sg = scsi_dma_map(scp);
8155 if (use_sg == 0) { 7811 if (use_sg < 0) {
7812 ASC_DBG(1, "failed to map SG list\n");
7813 ASC_STATS(scp->device->host, adv_build_noreq);
7814 return ASC_BUSY;
7815 } else if (use_sg == 0) {
8156 /* Zero-length transfer */ 7816 /* Zero-length transfer */
8157 reqp->sgblkp = NULL; 7817 reqp->sgblkp = NULL;
8158 scsiqp->data_cnt = 0; 7818 scsiqp->data_cnt = 0;
8159 scsiqp->vdata_addr = NULL;
8160 7819
8161 scsiqp->data_addr = 0; 7820 scsiqp->data_addr = 0;
8162 scsiqp->sg_list_ptr = NULL; 7821 scsiqp->sg_list_ptr = NULL;
@@ -8168,27 +7827,20 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
8168 scp->device->host->sg_tablesize); 7827 scp->device->host->sg_tablesize);
8169 scsi_dma_unmap(scp); 7828 scsi_dma_unmap(scp);
8170 scp->result = HOST_BYTE(DID_ERROR); 7829 scp->result = HOST_BYTE(DID_ERROR);
8171 7830 reqp->cmndp = NULL;
8172 /* 7831 scp->host_scribble = NULL;
8173 * Free the 'adv_req_t' structure by adding it back
8174 * to the board free list.
8175 */
8176 reqp->next_reqp = boardp->adv_reqp;
8177 boardp->adv_reqp = reqp;
8178 7832
8179 return ASC_ERROR; 7833 return ASC_ERROR;
8180 } 7834 }
8181 7835
8182 scsiqp->data_cnt = cpu_to_le32(scsi_bufflen(scp)); 7836 scsiqp->data_cnt = cpu_to_le32(scsi_bufflen(scp));
8183 7837
8184 ret = adv_get_sglist(boardp, reqp, scp, use_sg); 7838 ret = adv_get_sglist(boardp, reqp, scsiqp, scp, use_sg);
8185 if (ret != ADV_SUCCESS) { 7839 if (ret != ADV_SUCCESS) {
8186 /* 7840 scsi_dma_unmap(scp);
8187 * Free the adv_req_t structure by adding it back to 7841 scp->result = HOST_BYTE(DID_ERROR);
8188 * the board free list. 7842 reqp->cmndp = NULL;
8189 */ 7843 scp->host_scribble = NULL;
8190 reqp->next_reqp = boardp->adv_reqp;
8191 boardp->adv_reqp = reqp;
8192 7844
8193 return ret; 7845 return ret;
8194 } 7846 }
@@ -8201,7 +7853,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
8201 ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp); 7853 ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp);
8202 ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len); 7854 ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len);
8203 7855
8204 *adv_scsiqpp = scsiqp; 7856 *adv_reqpp = reqp;
8205 7857
8206 return ASC_NOERROR; 7858 return ASC_NOERROR;
8207} 7859}
@@ -8358,8 +8010,8 @@ AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no)
8358 int i; 8010 int i;
8359 ASC_SG_HEAD *sg_head; 8011 ASC_SG_HEAD *sg_head;
8360 ASC_SG_LIST_Q scsi_sg_q; 8012 ASC_SG_LIST_Q scsi_sg_q;
8361 ASC_DCNT saved_data_addr; 8013 __le32 saved_data_addr;
8362 ASC_DCNT saved_data_cnt; 8014 __le32 saved_data_cnt;
8363 PortAddr iop_base; 8015 PortAddr iop_base;
8364 ushort sg_list_dwords; 8016 ushort sg_list_dwords;
8365 ushort sg_index; 8017 ushort sg_index;
@@ -8371,42 +8023,15 @@ AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no)
8371 sg_head = scsiq->sg_head; 8023 sg_head = scsiq->sg_head;
8372 saved_data_addr = scsiq->q1.data_addr; 8024 saved_data_addr = scsiq->q1.data_addr;
8373 saved_data_cnt = scsiq->q1.data_cnt; 8025 saved_data_cnt = scsiq->q1.data_cnt;
8374 scsiq->q1.data_addr = (ASC_PADDR) sg_head->sg_list[0].addr; 8026 scsiq->q1.data_addr = cpu_to_le32(sg_head->sg_list[0].addr);
8375 scsiq->q1.data_cnt = (ASC_DCNT) sg_head->sg_list[0].bytes; 8027 scsiq->q1.data_cnt = cpu_to_le32(sg_head->sg_list[0].bytes);
8376#if CC_VERY_LONG_SG_LIST
8377 /* 8028 /*
8378 * If sg_head->entry_cnt is greater than ASC_MAX_SG_LIST 8029 * Set sg_entry_cnt to be the number of SG elements that
8379 * then not all SG elements will fit in the allocated queues. 8030 * will fit in the allocated SG queues. It is minus 1, because
8380 * The rest of the SG elements will be copied when the RISC 8031 * the first SG element is handled above.
8381 * completes the SG elements that fit and halts.
8382 */ 8032 */
8383 if (sg_head->entry_cnt > ASC_MAX_SG_LIST) { 8033 sg_entry_cnt = sg_head->entry_cnt - 1;
8384 /*
8385 * Set sg_entry_cnt to be the number of SG elements that
8386 * will fit in the allocated SG queues. It is minus 1, because
8387 * the first SG element is handled above. ASC_MAX_SG_LIST is
8388 * already inflated by 1 to account for this. For example it
8389 * may be 50 which is 1 + 7 queues * 7 SG elements.
8390 */
8391 sg_entry_cnt = ASC_MAX_SG_LIST - 1;
8392 8034
8393 /*
8394 * Keep track of remaining number of SG elements that will
8395 * need to be handled from a_isr.c.
8396 */
8397 scsiq->remain_sg_entry_cnt =
8398 sg_head->entry_cnt - ASC_MAX_SG_LIST;
8399 } else {
8400#endif /* CC_VERY_LONG_SG_LIST */
8401 /*
8402 * Set sg_entry_cnt to be the number of SG elements that
8403 * will fit in the allocated SG queues. It is minus 1, because
8404 * the first SG element is handled above.
8405 */
8406 sg_entry_cnt = sg_head->entry_cnt - 1;
8407#if CC_VERY_LONG_SG_LIST
8408 }
8409#endif /* CC_VERY_LONG_SG_LIST */
8410 if (sg_entry_cnt != 0) { 8035 if (sg_entry_cnt != 0) {
8411 scsiq->q1.cntl |= QC_SG_HEAD; 8036 scsiq->q1.cntl |= QC_SG_HEAD;
8412 q_addr = ASC_QNO_TO_QADDR(q_no); 8037 q_addr = ASC_QNO_TO_QADDR(q_no);
@@ -8431,21 +8056,7 @@ AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no)
8431 ASC_SG_LIST_PER_Q - 1; 8056 ASC_SG_LIST_PER_Q - 1;
8432 } 8057 }
8433 } else { 8058 } else {
8434#if CC_VERY_LONG_SG_LIST 8059 scsi_sg_q.cntl |= QCSG_SG_XFER_END;
8435 /*
8436 * This is the last SG queue in the list of
8437 * allocated SG queues. If there are more
8438 * SG elements than will fit in the allocated
8439 * queues, then set the QCSG_SG_XFER_MORE flag.
8440 */
8441 if (sg_head->entry_cnt > ASC_MAX_SG_LIST) {
8442 scsi_sg_q.cntl |= QCSG_SG_XFER_MORE;
8443 } else {
8444#endif /* CC_VERY_LONG_SG_LIST */
8445 scsi_sg_q.cntl |= QCSG_SG_XFER_END;
8446#if CC_VERY_LONG_SG_LIST
8447 }
8448#endif /* CC_VERY_LONG_SG_LIST */
8449 sg_list_dwords = sg_entry_cnt << 1; 8060 sg_list_dwords = sg_entry_cnt << 1;
8450 if (i == 0) { 8061 if (i == 0) {
8451 scsi_sg_q.sg_list_cnt = sg_entry_cnt; 8062 scsi_sg_q.sg_list_cnt = sg_entry_cnt;
@@ -8550,9 +8161,9 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
8550 PortAddr iop_base; 8161 PortAddr iop_base;
8551 int sta; 8162 int sta;
8552 int n_q_required; 8163 int n_q_required;
8553 int disable_syn_offset_one_fix; 8164 bool disable_syn_offset_one_fix;
8554 int i; 8165 int i;
8555 ASC_PADDR addr; 8166 u32 addr;
8556 ushort sg_entry_cnt = 0; 8167 ushort sg_entry_cnt = 0;
8557 ushort sg_entry_cnt_minus_one = 0; 8168 ushort sg_entry_cnt_minus_one = 0;
8558 uchar target_ix; 8169 uchar target_ix;
@@ -8562,12 +8173,12 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
8562 uchar scsi_cmd; 8173 uchar scsi_cmd;
8563 uchar disable_cmd; 8174 uchar disable_cmd;
8564 ASC_SG_HEAD *sg_head; 8175 ASC_SG_HEAD *sg_head;
8565 ASC_DCNT data_cnt; 8176 unsigned long data_cnt;
8566 8177
8567 iop_base = asc_dvc->iop_base; 8178 iop_base = asc_dvc->iop_base;
8568 sg_head = scsiq->sg_head; 8179 sg_head = scsiq->sg_head;
8569 if (asc_dvc->err_code != 0) 8180 if (asc_dvc->err_code != 0)
8570 return (ERR); 8181 return ASC_ERROR;
8571 scsiq->q1.q_no = 0; 8182 scsiq->q1.q_no = 0;
8572 if ((scsiq->q2.tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0) { 8183 if ((scsiq->q2.tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0) {
8573 scsiq->q1.extra_bytes = 0; 8184 scsiq->q1.extra_bytes = 0;
@@ -8593,46 +8204,41 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
8593 } 8204 }
8594 if (asc_dvc->in_critical_cnt != 0) { 8205 if (asc_dvc->in_critical_cnt != 0) {
8595 AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CRITICAL_RE_ENTRY); 8206 AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CRITICAL_RE_ENTRY);
8596 return (ERR); 8207 return ASC_ERROR;
8597 } 8208 }
8598 asc_dvc->in_critical_cnt++; 8209 asc_dvc->in_critical_cnt++;
8599 if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) { 8210 if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
8600 if ((sg_entry_cnt = sg_head->entry_cnt) == 0) { 8211 if ((sg_entry_cnt = sg_head->entry_cnt) == 0) {
8601 asc_dvc->in_critical_cnt--; 8212 asc_dvc->in_critical_cnt--;
8602 return (ERR); 8213 return ASC_ERROR;
8603 } 8214 }
8604#if !CC_VERY_LONG_SG_LIST
8605 if (sg_entry_cnt > ASC_MAX_SG_LIST) { 8215 if (sg_entry_cnt > ASC_MAX_SG_LIST) {
8606 asc_dvc->in_critical_cnt--; 8216 asc_dvc->in_critical_cnt--;
8607 return (ERR); 8217 return ASC_ERROR;
8608 } 8218 }
8609#endif /* !CC_VERY_LONG_SG_LIST */
8610 if (sg_entry_cnt == 1) { 8219 if (sg_entry_cnt == 1) {
8611 scsiq->q1.data_addr = 8220 scsiq->q1.data_addr = cpu_to_le32(sg_head->sg_list[0].addr);
8612 (ADV_PADDR)sg_head->sg_list[0].addr; 8221 scsiq->q1.data_cnt = cpu_to_le32(sg_head->sg_list[0].bytes);
8613 scsiq->q1.data_cnt =
8614 (ADV_DCNT)sg_head->sg_list[0].bytes;
8615 scsiq->q1.cntl &= ~(QC_SG_HEAD | QC_SG_SWAP_QUEUE); 8222 scsiq->q1.cntl &= ~(QC_SG_HEAD | QC_SG_SWAP_QUEUE);
8616 } 8223 }
8617 sg_entry_cnt_minus_one = sg_entry_cnt - 1; 8224 sg_entry_cnt_minus_one = sg_entry_cnt - 1;
8618 } 8225 }
8619 scsi_cmd = scsiq->cdbptr[0]; 8226 scsi_cmd = scsiq->cdbptr[0];
8620 disable_syn_offset_one_fix = FALSE; 8227 disable_syn_offset_one_fix = false;
8621 if ((asc_dvc->pci_fix_asyn_xfer & scsiq->q1.target_id) && 8228 if ((asc_dvc->pci_fix_asyn_xfer & scsiq->q1.target_id) &&
8622 !(asc_dvc->pci_fix_asyn_xfer_always & scsiq->q1.target_id)) { 8229 !(asc_dvc->pci_fix_asyn_xfer_always & scsiq->q1.target_id)) {
8623 if (scsiq->q1.cntl & QC_SG_HEAD) { 8230 if (scsiq->q1.cntl & QC_SG_HEAD) {
8624 data_cnt = 0; 8231 data_cnt = 0;
8625 for (i = 0; i < sg_entry_cnt; i++) { 8232 for (i = 0; i < sg_entry_cnt; i++) {
8626 data_cnt += 8233 data_cnt += le32_to_cpu(sg_head->sg_list[i].
8627 (ADV_DCNT)le32_to_cpu(sg_head->sg_list[i]. 8234 bytes);
8628 bytes);
8629 } 8235 }
8630 } else { 8236 } else {
8631 data_cnt = le32_to_cpu(scsiq->q1.data_cnt); 8237 data_cnt = le32_to_cpu(scsiq->q1.data_cnt);
8632 } 8238 }
8633 if (data_cnt != 0UL) { 8239 if (data_cnt != 0UL) {
8634 if (data_cnt < 512UL) { 8240 if (data_cnt < 512UL) {
8635 disable_syn_offset_one_fix = TRUE; 8241 disable_syn_offset_one_fix = true;
8636 } else { 8242 } else {
8637 for (i = 0; i < ASC_SYN_OFFSET_ONE_DISABLE_LIST; 8243 for (i = 0; i < ASC_SYN_OFFSET_ONE_DISABLE_LIST;
8638 i++) { 8244 i++) {
@@ -8643,7 +8249,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
8643 } 8249 }
8644 if (scsi_cmd == disable_cmd) { 8250 if (scsi_cmd == disable_cmd) {
8645 disable_syn_offset_one_fix = 8251 disable_syn_offset_one_fix =
8646 TRUE; 8252 true;
8647 break; 8253 break;
8648 } 8254 }
8649 } 8255 }
@@ -8662,12 +8268,11 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
8662 if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) { 8268 if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) {
8663 if ((scsi_cmd == READ_6) || 8269 if ((scsi_cmd == READ_6) ||
8664 (scsi_cmd == READ_10)) { 8270 (scsi_cmd == READ_10)) {
8665 addr = 8271 addr = le32_to_cpu(sg_head->
8666 (ADV_PADDR)le32_to_cpu(sg_head->
8667 sg_list 8272 sg_list
8668 [sg_entry_cnt_minus_one]. 8273 [sg_entry_cnt_minus_one].
8669 addr) + 8274 addr) +
8670 (ADV_DCNT)le32_to_cpu(sg_head-> 8275 le32_to_cpu(sg_head->
8671 sg_list 8276 sg_list
8672 [sg_entry_cnt_minus_one]. 8277 [sg_entry_cnt_minus_one].
8673 bytes); 8278 bytes);
@@ -8688,8 +8293,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
8688 sg_list 8293 sg_list
8689 [sg_entry_cnt_minus_one]. 8294 [sg_entry_cnt_minus_one].
8690 bytes); 8295 bytes);
8691 data_cnt -= 8296 data_cnt -= extra_bytes;
8692 (ASC_DCNT) extra_bytes;
8693 sg_head-> 8297 sg_head->
8694 sg_list 8298 sg_list
8695 [sg_entry_cnt_minus_one]. 8299 [sg_entry_cnt_minus_one].
@@ -8700,16 +8304,6 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
8700 } 8304 }
8701 } 8305 }
8702 sg_head->entry_to_copy = sg_head->entry_cnt; 8306 sg_head->entry_to_copy = sg_head->entry_cnt;
8703#if CC_VERY_LONG_SG_LIST
8704 /*
8705 * Set the sg_entry_cnt to the maximum possible. The rest of
8706 * the SG elements will be copied when the RISC completes the
8707 * SG elements that fit and halts.
8708 */
8709 if (sg_entry_cnt > ASC_MAX_SG_LIST) {
8710 sg_entry_cnt = ASC_MAX_SG_LIST;
8711 }
8712#endif /* CC_VERY_LONG_SG_LIST */
8713 n_q_required = AscSgListToQueue(sg_entry_cnt); 8307 n_q_required = AscSgListToQueue(sg_entry_cnt);
8714 if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, n_q_required) >= 8308 if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, n_q_required) >=
8715 (uint) n_q_required) 8309 (uint) n_q_required)
@@ -8744,8 +8338,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
8744 == 0) { 8338 == 0) {
8745 scsiq->q2.tag_code |= 8339 scsiq->q2.tag_code |=
8746 ASC_TAG_FLAG_EXTRA_BYTES; 8340 ASC_TAG_FLAG_EXTRA_BYTES;
8747 data_cnt -= (ASC_DCNT) 8341 data_cnt -= extra_bytes;
8748 extra_bytes;
8749 scsiq->q1.data_cnt = 8342 scsiq->q1.data_cnt =
8750 cpu_to_le32 8343 cpu_to_le32
8751 (data_cnt); 8344 (data_cnt);
@@ -8780,7 +8373,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
8780 * If 'done_status' is not set to QD_DO_RETRY, then 'error_retry' will be 8373 * If 'done_status' is not set to QD_DO_RETRY, then 'error_retry' will be
8781 * set to SCSI_MAX_RETRY. 8374 * set to SCSI_MAX_RETRY.
8782 * 8375 *
8783 * Multi-byte fields in the ASC_SCSI_REQ_Q that are used by the microcode 8376 * Multi-byte fields in the ADV_SCSI_REQ_Q that are used by the microcode
8784 * for DMA addresses or math operations are byte swapped to little-endian 8377 * for DMA addresses or math operations are byte swapped to little-endian
8785 * order. 8378 * order.
8786 * 8379 *
@@ -8791,11 +8384,11 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
8791 * ADV_ERROR(-1) - Invalid ADV_SCSI_REQ_Q request structure 8384 * ADV_ERROR(-1) - Invalid ADV_SCSI_REQ_Q request structure
8792 * host IC error. 8385 * host IC error.
8793 */ 8386 */
8794static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, ADV_SCSI_REQ_Q *scsiq) 8387static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, adv_req_t *reqp)
8795{ 8388{
8796 AdvPortAddr iop_base; 8389 AdvPortAddr iop_base;
8797 ADV_PADDR req_paddr;
8798 ADV_CARR_T *new_carrp; 8390 ADV_CARR_T *new_carrp;
8391 ADV_SCSI_REQ_Q *scsiq = &reqp->scsi_req_q;
8799 8392
8800 /* 8393 /*
8801 * The ADV_SCSI_REQ_Q 'target_id' field should never exceed ADV_MAX_TID. 8394 * The ADV_SCSI_REQ_Q 'target_id' field should never exceed ADV_MAX_TID.
@@ -8812,39 +8405,19 @@ static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, ADV_SCSI_REQ_Q *scsiq)
8812 * Allocate a carrier ensuring at least one carrier always 8405 * Allocate a carrier ensuring at least one carrier always
8813 * remains on the freelist and initialize fields. 8406 * remains on the freelist and initialize fields.
8814 */ 8407 */
8815 if ((new_carrp = asc_dvc->carr_freelist) == NULL) { 8408 new_carrp = adv_get_next_carrier(asc_dvc);
8409 if (!new_carrp) {
8410 ASC_DBG(1, "No free carriers\n");
8816 return ADV_BUSY; 8411 return ADV_BUSY;
8817 } 8412 }
8818 asc_dvc->carr_freelist = (ADV_CARR_T *)
8819 ADV_U32_TO_VADDR(le32_to_cpu(new_carrp->next_vpa));
8820 asc_dvc->carr_pending_cnt++;
8821
8822 /*
8823 * Set the carrier to be a stopper by setting 'next_vpa'
8824 * to the stopper value. The current stopper will be changed
8825 * below to point to the new stopper.
8826 */
8827 new_carrp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
8828 8413
8829 /* 8414 asc_dvc->carr_pending_cnt++;
8830 * Clear the ADV_SCSI_REQ_Q done flag.
8831 */
8832 scsiq->a_flag &= ~ADV_SCSIQ_DONE;
8833
8834 req_paddr = virt_to_bus(scsiq);
8835 BUG_ON(req_paddr & 31);
8836 /* Wait for assertion before making little-endian */
8837 req_paddr = cpu_to_le32(req_paddr);
8838 8415
8839 /* Save virtual and physical address of ADV_SCSI_REQ_Q and carrier. */ 8416 /* Save virtual and physical address of ADV_SCSI_REQ_Q and carrier. */
8840 scsiq->scsiq_ptr = cpu_to_le32(ADV_VADDR_TO_U32(scsiq)); 8417 scsiq->scsiq_ptr = cpu_to_le32(scsiq->srb_tag);
8841 scsiq->scsiq_rptr = req_paddr; 8418 scsiq->scsiq_rptr = cpu_to_le32(reqp->req_addr);
8842 8419
8843 scsiq->carr_va = cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->icq_sp)); 8420 scsiq->carr_va = asc_dvc->icq_sp->carr_va;
8844 /*
8845 * Every ADV_CARR_T.carr_pa is byte swapped to little-endian
8846 * order during initialization.
8847 */
8848 scsiq->carr_pa = asc_dvc->icq_sp->carr_pa; 8421 scsiq->carr_pa = asc_dvc->icq_sp->carr_pa;
8849 8422
8850 /* 8423 /*
@@ -8852,7 +8425,7 @@ static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, ADV_SCSI_REQ_Q *scsiq)
8852 * the microcode. The newly allocated stopper will become the new 8425 * the microcode. The newly allocated stopper will become the new
8853 * stopper. 8426 * stopper.
8854 */ 8427 */
8855 asc_dvc->icq_sp->areq_vpa = req_paddr; 8428 asc_dvc->icq_sp->areq_vpa = scsiq->scsiq_rptr;
8856 8429
8857 /* 8430 /*
8858 * Set the 'next_vpa' pointer for the old stopper to be the 8431 * Set the 'next_vpa' pointer for the old stopper to be the
@@ -8907,11 +8480,10 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
8907 ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var; 8480 ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var;
8908 struct asc_scsi_q asc_scsi_q; 8481 struct asc_scsi_q asc_scsi_q;
8909 8482
8910 /* asc_build_req() can not return ASC_BUSY. */
8911 ret = asc_build_req(boardp, scp, &asc_scsi_q); 8483 ret = asc_build_req(boardp, scp, &asc_scsi_q);
8912 if (ret == ASC_ERROR) { 8484 if (ret != ASC_NOERROR) {
8913 ASC_STATS(scp->device->host, build_error); 8485 ASC_STATS(scp->device->host, build_error);
8914 return ASC_ERROR; 8486 return ret;
8915 } 8487 }
8916 8488
8917 ret = AscExeScsiQueue(asc_dvc, &asc_scsi_q); 8489 ret = AscExeScsiQueue(asc_dvc, &asc_scsi_q);
@@ -8919,9 +8491,9 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
8919 err_code = asc_dvc->err_code; 8491 err_code = asc_dvc->err_code;
8920 } else { 8492 } else {
8921 ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var; 8493 ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var;
8922 ADV_SCSI_REQ_Q *adv_scsiqp; 8494 adv_req_t *adv_reqp;
8923 8495
8924 switch (adv_build_req(boardp, scp, &adv_scsiqp)) { 8496 switch (adv_build_req(boardp, scp, &adv_reqp)) {
8925 case ASC_NOERROR: 8497 case ASC_NOERROR:
8926 ASC_DBG(3, "adv_build_req ASC_NOERROR\n"); 8498 ASC_DBG(3, "adv_build_req ASC_NOERROR\n");
8927 break; 8499 break;
@@ -8941,7 +8513,7 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
8941 return ASC_ERROR; 8513 return ASC_ERROR;
8942 } 8514 }
8943 8515
8944 ret = AdvExeScsiQueue(adv_dvc, adv_scsiqp); 8516 ret = AdvExeScsiQueue(adv_dvc, adv_reqp);
8945 err_code = adv_dvc->err_code; 8517 err_code = adv_dvc->err_code;
8946 } 8518 }
8947 8519
@@ -8956,6 +8528,7 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
8956 ASC_DBG(1, "ExeScsiQueue() ASC_NOERROR\n"); 8528 ASC_DBG(1, "ExeScsiQueue() ASC_NOERROR\n");
8957 break; 8529 break;
8958 case ASC_BUSY: 8530 case ASC_BUSY:
8531 ASC_DBG(1, "ExeScsiQueue() ASC_BUSY\n");
8959 ASC_STATS(scp->device->host, exe_busy); 8532 ASC_STATS(scp->device->host, exe_busy);
8960 break; 8533 break;
8961 case ASC_ERROR: 8534 case ASC_ERROR:
@@ -9122,7 +8695,7 @@ static int AscStopQueueExe(PortAddr iop_base)
9122 return (0); 8695 return (0);
9123} 8696}
9124 8697
9125static ASC_DCNT AscGetMaxDmaCount(ushort bus_type) 8698static unsigned int AscGetMaxDmaCount(ushort bus_type)
9126{ 8699{
9127 if (bus_type & ASC_IS_ISA) 8700 if (bus_type & ASC_IS_ISA)
9128 return ASC_MAX_ISA_DMA_COUNT; 8701 return ASC_MAX_ISA_DMA_COUNT;
@@ -9183,15 +8756,13 @@ static uchar AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value)
9183} 8756}
9184#endif /* CONFIG_ISA */ 8757#endif /* CONFIG_ISA */
9185 8758
9186static ushort AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc) 8759static void AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
9187{ 8760{
9188 int i; 8761 int i;
9189 PortAddr iop_base; 8762 PortAddr iop_base;
9190 ushort warn_code;
9191 uchar chip_version; 8763 uchar chip_version;
9192 8764
9193 iop_base = asc_dvc->iop_base; 8765 iop_base = asc_dvc->iop_base;
9194 warn_code = 0;
9195 asc_dvc->err_code = 0; 8766 asc_dvc->err_code = 0;
9196 if ((asc_dvc->bus_type & 8767 if ((asc_dvc->bus_type &
9197 (ASC_IS_ISA | ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) { 8768 (ASC_IS_ISA | ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) {
@@ -9205,7 +8776,7 @@ static ushort AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
9205 /* asc_dvc->init_state initialized in AscInitGetConfig(). */ 8776 /* asc_dvc->init_state initialized in AscInitGetConfig(). */
9206 asc_dvc->sdtr_done = 0; 8777 asc_dvc->sdtr_done = 0;
9207 asc_dvc->cur_total_qng = 0; 8778 asc_dvc->cur_total_qng = 0;
9208 asc_dvc->is_in_int = 0; 8779 asc_dvc->is_in_int = false;
9209 asc_dvc->in_critical_cnt = 0; 8780 asc_dvc->in_critical_cnt = 0;
9210 asc_dvc->last_q_shortage = 0; 8781 asc_dvc->last_q_shortage = 0;
9211 asc_dvc->use_tagged_qng = 0; 8782 asc_dvc->use_tagged_qng = 0;
@@ -9267,7 +8838,6 @@ static ushort AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
9267 asc_dvc->scsiq_busy_tail[i] = (ASC_SCSI_Q *)0L; 8838 asc_dvc->scsiq_busy_tail[i] = (ASC_SCSI_Q *)0L;
9268 asc_dvc->cfg->max_tag_qng[i] = ASC_MAX_INRAM_TAG_QNG; 8839 asc_dvc->cfg->max_tag_qng[i] = ASC_MAX_INRAM_TAG_QNG;
9269 } 8840 }
9270 return warn_code;
9271} 8841}
9272 8842
9273static int AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg) 8843static int AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg)
@@ -9385,7 +8955,7 @@ static int AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg)
9385 int retry; 8955 int retry;
9386 8956
9387 retry = 0; 8957 retry = 0;
9388 while (TRUE) { 8958 while (true) {
9389 AscSetChipEEPData(iop_base, data_reg); 8959 AscSetChipEEPData(iop_base, data_reg);
9390 mdelay(1); 8960 mdelay(1);
9391 read_back = AscGetChipEEPData(iop_base); 8961 read_back = AscGetChipEEPData(iop_base);
@@ -9521,7 +9091,7 @@ static int AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf,
9521 int n_error; 9091 int n_error;
9522 9092
9523 retry = 0; 9093 retry = 0;
9524 while (TRUE) { 9094 while (true) {
9525 if ((n_error = AscSetEEPConfigOnce(iop_base, cfg_buf, 9095 if ((n_error = AscSetEEPConfigOnce(iop_base, cfg_buf,
9526 bus_type)) == 0) { 9096 bus_type)) == 0) {
9527 break; 9097 break;
@@ -9533,7 +9103,7 @@ static int AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf,
9533 return n_error; 9103 return n_error;
9534} 9104}
9535 9105
9536static ushort AscInitFromEEP(ASC_DVC_VAR *asc_dvc) 9106static int AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
9537{ 9107{
9538 ASCEEP_CONFIG eep_config_buf; 9108 ASCEEP_CONFIG eep_config_buf;
9539 ASCEEP_CONFIG *eep_config; 9109 ASCEEP_CONFIG *eep_config;
@@ -9548,13 +9118,13 @@ static ushort AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
9548 warn_code = 0; 9118 warn_code = 0;
9549 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0x00FE); 9119 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0x00FE);
9550 AscStopQueueExe(iop_base); 9120 AscStopQueueExe(iop_base);
9551 if ((AscStopChip(iop_base) == FALSE) || 9121 if ((AscStopChip(iop_base)) ||
9552 (AscGetChipScsiCtrl(iop_base) != 0)) { 9122 (AscGetChipScsiCtrl(iop_base) != 0)) {
9553 asc_dvc->init_state |= ASC_INIT_RESET_SCSI_DONE; 9123 asc_dvc->init_state |= ASC_INIT_RESET_SCSI_DONE;
9554 AscResetChipAndScsiBus(asc_dvc); 9124 AscResetChipAndScsiBus(asc_dvc);
9555 mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ 9125 mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */
9556 } 9126 }
9557 if (AscIsChipHalted(iop_base) == FALSE) { 9127 if (!AscIsChipHalted(iop_base)) {
9558 asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; 9128 asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP;
9559 return (warn_code); 9129 return (warn_code);
9560 } 9130 }
@@ -9709,8 +9279,8 @@ static int AscInitGetConfig(struct Scsi_Host *shost)
9709 return asc_dvc->err_code; 9279 return asc_dvc->err_code;
9710 9280
9711 if (AscFindSignature(asc_dvc->iop_base)) { 9281 if (AscFindSignature(asc_dvc->iop_base)) {
9712 warn_code |= AscInitAscDvcVar(asc_dvc); 9282 AscInitAscDvcVar(asc_dvc);
9713 warn_code |= AscInitFromEEP(asc_dvc); 9283 warn_code = AscInitFromEEP(asc_dvc);
9714 asc_dvc->init_state |= ASC_INIT_STATE_END_GET_CFG; 9284 asc_dvc->init_state |= ASC_INIT_STATE_END_GET_CFG;
9715 if (asc_dvc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT) 9285 if (asc_dvc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
9716 asc_dvc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT; 9286 asc_dvc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
@@ -9866,6 +9436,7 @@ static int AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
9866 * on big-endian platforms so char fields read as words are actually being 9436 * on big-endian platforms so char fields read as words are actually being
9867 * unswapped on big-endian platforms. 9437 * unswapped on big-endian platforms.
9868 */ 9438 */
9439#ifdef CONFIG_PCI
9869static ADVEEP_3550_CONFIG Default_3550_EEPROM_Config = { 9440static ADVEEP_3550_CONFIG Default_3550_EEPROM_Config = {
9870 ADV_EEPROM_BIOS_ENABLE, /* cfg_lsw */ 9441 ADV_EEPROM_BIOS_ENABLE, /* cfg_lsw */
9871 0x0000, /* cfg_msw */ 9442 0x0000, /* cfg_msw */
@@ -10202,7 +9773,6 @@ static ADVEEP_38C1600_CONFIG ADVEEP_38C1600_Config_Field_IsChar = {
10202 0 /* 63 reserved */ 9773 0 /* 63 reserved */
10203}; 9774};
10204 9775
10205#ifdef CONFIG_PCI
10206/* 9776/*
10207 * Wait for EEPROM command to complete 9777 * Wait for EEPROM command to complete
10208 */ 9778 */
@@ -11232,7 +10802,7 @@ static struct scsi_host_template advansys_template = {
11232 .name = DRV_NAME, 10802 .name = DRV_NAME,
11233 .info = advansys_info, 10803 .info = advansys_info,
11234 .queuecommand = advansys_queuecommand, 10804 .queuecommand = advansys_queuecommand,
11235 .eh_bus_reset_handler = advansys_reset, 10805 .eh_host_reset_handler = advansys_reset,
11236 .bios_param = advansys_biosparam, 10806 .bios_param = advansys_biosparam,
11237 .slave_configure = advansys_slave_configure, 10807 .slave_configure = advansys_slave_configure,
11238 /* 10808 /*
@@ -11240,7 +10810,7 @@ static struct scsi_host_template advansys_template = {
11240 * must be set. The flag will be cleared in advansys_board_found 10810 * must be set. The flag will be cleared in advansys_board_found
11241 * for non-ISA adapters. 10811 * for non-ISA adapters.
11242 */ 10812 */
11243 .unchecked_isa_dma = 1, 10813 .unchecked_isa_dma = true,
11244 /* 10814 /*
11245 * All adapters controlled by this driver are capable of large 10815 * All adapters controlled by this driver are capable of large
11246 * scatter-gather lists. According to the mid-level SCSI documentation 10816 * scatter-gather lists. According to the mid-level SCSI documentation
@@ -11249,26 +10819,25 @@ static struct scsi_host_template advansys_template = {
11249 * by enabling clustering, I/O throughput increases as well. 10819 * by enabling clustering, I/O throughput increases as well.
11250 */ 10820 */
11251 .use_clustering = ENABLE_CLUSTERING, 10821 .use_clustering = ENABLE_CLUSTERING,
10822 .use_blk_tags = 1,
11252}; 10823};
11253 10824
11254static int advansys_wide_init_chip(struct Scsi_Host *shost) 10825static int advansys_wide_init_chip(struct Scsi_Host *shost)
11255{ 10826{
11256 struct asc_board *board = shost_priv(shost); 10827 struct asc_board *board = shost_priv(shost);
11257 struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var; 10828 struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var;
11258 int req_cnt = 0; 10829 size_t sgblk_pool_size;
11259 adv_req_t *reqp = NULL;
11260 int sg_cnt = 0;
11261 adv_sgblk_t *sgp;
11262 int warn_code, err_code; 10830 int warn_code, err_code;
11263 10831
11264 /* 10832 /*
11265 * Allocate buffer carrier structures. The total size 10833 * Allocate buffer carrier structures. The total size
11266 * is about 4 KB, so allocate all at once. 10834 * is about 8 KB, so allocate all at once.
11267 */ 10835 */
11268 adv_dvc->carrier_buf = kmalloc(ADV_CARRIER_BUFSIZE, GFP_KERNEL); 10836 adv_dvc->carrier = dma_alloc_coherent(board->dev,
11269 ASC_DBG(1, "carrier_buf 0x%p\n", adv_dvc->carrier_buf); 10837 ADV_CARRIER_BUFSIZE, &adv_dvc->carrier_addr, GFP_KERNEL);
10838 ASC_DBG(1, "carrier 0x%p\n", adv_dvc->carrier);
11270 10839
11271 if (!adv_dvc->carrier_buf) 10840 if (!adv_dvc->carrier)
11272 goto kmalloc_failed; 10841 goto kmalloc_failed;
11273 10842
11274 /* 10843 /*
@@ -11276,54 +10845,34 @@ static int advansys_wide_init_chip(struct Scsi_Host *shost)
11276 * board. The total size is about 16 KB, so allocate all at once. 10845 * board. The total size is about 16 KB, so allocate all at once.
11277 * If the allocation fails decrement and try again. 10846 * If the allocation fails decrement and try again.
11278 */ 10847 */
11279 for (req_cnt = adv_dvc->max_host_qng; req_cnt > 0; req_cnt--) { 10848 board->adv_reqp_size = adv_dvc->max_host_qng * sizeof(adv_req_t);
11280 reqp = kmalloc(sizeof(adv_req_t) * req_cnt, GFP_KERNEL); 10849 if (board->adv_reqp_size & 0x1f) {
11281 10850 ASC_DBG(1, "unaligned reqp %lu bytes\n", sizeof(adv_req_t));
11282 ASC_DBG(1, "reqp 0x%p, req_cnt %d, bytes %lu\n", reqp, req_cnt, 10851 board->adv_reqp_size = ADV_32BALIGN(board->adv_reqp_size);
11283 (ulong)sizeof(adv_req_t) * req_cnt);
11284
11285 if (reqp)
11286 break;
11287 } 10852 }
10853 board->adv_reqp = dma_alloc_coherent(board->dev, board->adv_reqp_size,
10854 &board->adv_reqp_addr, GFP_KERNEL);
11288 10855
11289 if (!reqp) 10856 if (!board->adv_reqp)
11290 goto kmalloc_failed; 10857 goto kmalloc_failed;
11291 10858
11292 adv_dvc->orig_reqp = reqp; 10859 ASC_DBG(1, "reqp 0x%p, req_cnt %d, bytes %lu\n", board->adv_reqp,
10860 adv_dvc->max_host_qng, board->adv_reqp_size);
11293 10861
11294 /* 10862 /*
11295 * Allocate up to ADV_TOT_SG_BLOCK request structures for 10863 * Allocate up to ADV_TOT_SG_BLOCK request structures for
11296 * the Wide board. Each structure is about 136 bytes. 10864 * the Wide board. Each structure is about 136 bytes.
11297 */ 10865 */
11298 board->adv_sgblkp = NULL; 10866 sgblk_pool_size = sizeof(adv_sgblk_t) * ADV_TOT_SG_BLOCK;
11299 for (sg_cnt = 0; sg_cnt < ADV_TOT_SG_BLOCK; sg_cnt++) { 10867 board->adv_sgblk_pool = dma_pool_create("adv_sgblk", board->dev,
11300 sgp = kmalloc(sizeof(adv_sgblk_t), GFP_KERNEL); 10868 sgblk_pool_size, 32, 0);
11301 10869
11302 if (!sgp) 10870 ASC_DBG(1, "sg_cnt %d * %lu = %lu bytes\n", ADV_TOT_SG_BLOCK,
11303 break; 10871 sizeof(adv_sgblk_t), sgblk_pool_size);
11304
11305 sgp->next_sgblkp = board->adv_sgblkp;
11306 board->adv_sgblkp = sgp;
11307
11308 }
11309
11310 ASC_DBG(1, "sg_cnt %d * %lu = %lu bytes\n", sg_cnt, sizeof(adv_sgblk_t),
11311 sizeof(adv_sgblk_t) * sg_cnt);
11312 10872
11313 if (!board->adv_sgblkp) 10873 if (!board->adv_sgblk_pool)
11314 goto kmalloc_failed; 10874 goto kmalloc_failed;
11315 10875
11316 /*
11317 * Point 'adv_reqp' to the request structures and
11318 * link them together.
11319 */
11320 req_cnt--;
11321 reqp[req_cnt].next_reqp = NULL;
11322 for (; req_cnt > 0; req_cnt--) {
11323 reqp[req_cnt - 1].next_reqp = &reqp[req_cnt];
11324 }
11325 board->adv_reqp = &reqp[0];
11326
11327 if (adv_dvc->chip_type == ADV_CHIP_ASC3550) { 10876 if (adv_dvc->chip_type == ADV_CHIP_ASC3550) {
11328 ASC_DBG(2, "AdvInitAsc3550Driver()\n"); 10877 ASC_DBG(2, "AdvInitAsc3550Driver()\n");
11329 warn_code = AdvInitAsc3550Driver(adv_dvc); 10878 warn_code = AdvInitAsc3550Driver(adv_dvc);
@@ -11353,14 +10902,20 @@ static int advansys_wide_init_chip(struct Scsi_Host *shost)
11353static void advansys_wide_free_mem(struct asc_board *board) 10902static void advansys_wide_free_mem(struct asc_board *board)
11354{ 10903{
11355 struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var; 10904 struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var;
11356 kfree(adv_dvc->carrier_buf); 10905
11357 adv_dvc->carrier_buf = NULL; 10906 if (adv_dvc->carrier) {
11358 kfree(adv_dvc->orig_reqp); 10907 dma_free_coherent(board->dev, ADV_CARRIER_BUFSIZE,
11359 adv_dvc->orig_reqp = board->adv_reqp = NULL; 10908 adv_dvc->carrier, adv_dvc->carrier_addr);
11360 while (board->adv_sgblkp) { 10909 adv_dvc->carrier = NULL;
11361 adv_sgblk_t *sgp = board->adv_sgblkp; 10910 }
11362 board->adv_sgblkp = sgp->next_sgblkp; 10911 if (board->adv_reqp) {
11363 kfree(sgp); 10912 dma_free_coherent(board->dev, board->adv_reqp_size,
10913 board->adv_reqp, board->adv_reqp_addr);
10914 board->adv_reqp = NULL;
10915 }
10916 if (board->adv_sgblk_pool) {
10917 dma_pool_destroy(board->adv_sgblk_pool);
10918 board->adv_sgblk_pool = NULL;
11364 } 10919 }
11365} 10920}
11366 10921
@@ -11431,28 +10986,28 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
11431 switch (asc_dvc_varp->bus_type) { 10986 switch (asc_dvc_varp->bus_type) {
11432#ifdef CONFIG_ISA 10987#ifdef CONFIG_ISA
11433 case ASC_IS_ISA: 10988 case ASC_IS_ISA:
11434 shost->unchecked_isa_dma = TRUE; 10989 shost->unchecked_isa_dma = true;
11435 share_irq = 0; 10990 share_irq = 0;
11436 break; 10991 break;
11437 case ASC_IS_VL: 10992 case ASC_IS_VL:
11438 shost->unchecked_isa_dma = FALSE; 10993 shost->unchecked_isa_dma = false;
11439 share_irq = 0; 10994 share_irq = 0;
11440 break; 10995 break;
11441 case ASC_IS_EISA: 10996 case ASC_IS_EISA:
11442 shost->unchecked_isa_dma = FALSE; 10997 shost->unchecked_isa_dma = false;
11443 share_irq = IRQF_SHARED; 10998 share_irq = IRQF_SHARED;
11444 break; 10999 break;
11445#endif /* CONFIG_ISA */ 11000#endif /* CONFIG_ISA */
11446#ifdef CONFIG_PCI 11001#ifdef CONFIG_PCI
11447 case ASC_IS_PCI: 11002 case ASC_IS_PCI:
11448 shost->unchecked_isa_dma = FALSE; 11003 shost->unchecked_isa_dma = false;
11449 share_irq = IRQF_SHARED; 11004 share_irq = IRQF_SHARED;
11450 break; 11005 break;
11451#endif /* CONFIG_PCI */ 11006#endif /* CONFIG_PCI */
11452 default: 11007 default:
11453 shost_printk(KERN_ERR, shost, "unknown adapter type: " 11008 shost_printk(KERN_ERR, shost, "unknown adapter type: "
11454 "%d\n", asc_dvc_varp->bus_type); 11009 "%d\n", asc_dvc_varp->bus_type);
11455 shost->unchecked_isa_dma = TRUE; 11010 shost->unchecked_isa_dma = false;
11456 share_irq = 0; 11011 share_irq = 0;
11457 break; 11012 break;
11458 } 11013 }
@@ -11471,7 +11026,7 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
11471 * For Wide boards set PCI information before calling 11026 * For Wide boards set PCI information before calling
11472 * AdvInitGetConfig(). 11027 * AdvInitGetConfig().
11473 */ 11028 */
11474 shost->unchecked_isa_dma = FALSE; 11029 shost->unchecked_isa_dma = false;
11475 share_irq = IRQF_SHARED; 11030 share_irq = IRQF_SHARED;
11476 ASC_DBG(2, "AdvInitGetConfig()\n"); 11031 ASC_DBG(2, "AdvInitGetConfig()\n");
11477 11032
@@ -11656,24 +11211,11 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
11656 /* Set maximum number of queues the adapter can handle. */ 11211 /* Set maximum number of queues the adapter can handle. */
11657 shost->can_queue = adv_dvc_varp->max_host_qng; 11212 shost->can_queue = adv_dvc_varp->max_host_qng;
11658 } 11213 }
11659 11214 ret = scsi_init_shared_tag_map(shost, shost->can_queue);
11660 /* 11215 if (ret) {
11661 * Following v1.3.89, 'cmd_per_lun' is no longer needed 11216 shost_printk(KERN_ERR, shost, "init tag map failed\n");
11662 * and should be set to zero. 11217 goto err_free_dma;
11663 * 11218 }
11664 * But because of a bug introduced in v1.3.89 if the driver is
11665 * compiled as a module and 'cmd_per_lun' is zero, the Mid-Level
11666 * SCSI function 'allocate_device' will panic. To allow the driver
11667 * to work as a module in these kernels set 'cmd_per_lun' to 1.
11668 *
11669 * Note: This is wrong. cmd_per_lun should be set to the depth
11670 * you want on untagged devices always.
11671 #ifdef MODULE
11672 */
11673 shost->cmd_per_lun = 1;
11674/* #else
11675 shost->cmd_per_lun = 0;
11676#endif */
11677 11219
11678 /* 11220 /*
11679 * Set the maximum number of scatter-gather elements the 11221 * Set the maximum number of scatter-gather elements the
@@ -11844,7 +11386,9 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
11844 err_unmap: 11386 err_unmap:
11845 if (boardp->ioremap_addr) 11387 if (boardp->ioremap_addr)
11846 iounmap(boardp->ioremap_addr); 11388 iounmap(boardp->ioremap_addr);
11389#ifdef CONFIG_PCI
11847 err_shost: 11390 err_shost:
11391#endif
11848 return ret; 11392 return ret;
11849} 11393}
11850 11394
@@ -11927,6 +11471,7 @@ static int advansys_isa_probe(struct device *dev, unsigned int id)
11927 board = shost_priv(shost); 11471 board = shost_priv(shost);
11928 board->irq = advansys_isa_irq_no(iop_base); 11472 board->irq = advansys_isa_irq_no(iop_base);
11929 board->dev = dev; 11473 board->dev = dev;
11474 board->shost = shost;
11930 11475
11931 err = advansys_board_found(shost, iop_base, ASC_IS_ISA); 11476 err = advansys_board_found(shost, iop_base, ASC_IS_ISA);
11932 if (err) 11477 if (err)
@@ -12009,6 +11554,7 @@ static int advansys_vlb_probe(struct device *dev, unsigned int id)
12009 board = shost_priv(shost); 11554 board = shost_priv(shost);
12010 board->irq = advansys_vlb_irq_no(iop_base); 11555 board->irq = advansys_vlb_irq_no(iop_base);
12011 board->dev = dev; 11556 board->dev = dev;
11557 board->shost = shost;
12012 11558
12013 err = advansys_board_found(shost, iop_base, ASC_IS_VL); 11559 err = advansys_board_found(shost, iop_base, ASC_IS_VL);
12014 if (err) 11560 if (err)
@@ -12116,6 +11662,7 @@ static int advansys_eisa_probe(struct device *dev)
12116 board = shost_priv(shost); 11662 board = shost_priv(shost);
12117 board->irq = irq; 11663 board->irq = irq;
12118 board->dev = dev; 11664 board->dev = dev;
11665 board->shost = shost;
12119 11666
12120 err = advansys_board_found(shost, ioport, ASC_IS_EISA); 11667 err = advansys_board_found(shost, ioport, ASC_IS_EISA);
12121 if (!err) { 11668 if (!err) {
@@ -12232,6 +11779,7 @@ static int advansys_pci_probe(struct pci_dev *pdev,
12232 board = shost_priv(shost); 11779 board = shost_priv(shost);
12233 board->irq = pdev->irq; 11780 board->irq = pdev->irq;
12234 board->dev = &pdev->dev; 11781 board->dev = &pdev->dev;
11782 board->shost = shost;
12235 11783
12236 if (pdev->device == PCI_DEVICE_ID_ASP_ABP940UW || 11784 if (pdev->device == PCI_DEVICE_ID_ASP_ABP940UW ||
12237 pdev->device == PCI_DEVICE_ID_38C0800_REV1 || 11785 pdev->device == PCI_DEVICE_ID_38C0800_REV1 ||
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index e31c460a1335..f44d0487236e 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -2922,7 +2922,6 @@ static struct scsi_host_template aha152x_driver_template = {
2922 .can_queue = 1, 2922 .can_queue = 1,
2923 .this_id = 7, 2923 .this_id = 7,
2924 .sg_tablesize = SG_ALL, 2924 .sg_tablesize = SG_ALL,
2925 .cmd_per_lun = 1,
2926 .use_clustering = DISABLE_CLUSTERING, 2925 .use_clustering = DISABLE_CLUSTERING,
2927 .slave_alloc = aha152x_adjust_queue, 2926 .slave_alloc = aha152x_adjust_queue,
2928}; 2927};
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index b95d2779f467..5b8b2937a3fe 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -950,7 +950,6 @@ static struct scsi_host_template driver_template = {
950 .can_queue = AHA1542_MAILBOXES, 950 .can_queue = AHA1542_MAILBOXES,
951 .this_id = 7, 951 .this_id = 7,
952 .sg_tablesize = 16, 952 .sg_tablesize = 16,
953 .cmd_per_lun = 1,
954 .unchecked_isa_dma = 1, 953 .unchecked_isa_dma = 1,
955 .use_clustering = ENABLE_CLUSTERING, 954 .use_clustering = ENABLE_CLUSTERING,
956}; 955};
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 31ace4bef8fe..bad35ffc015d 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -544,7 +544,6 @@ static struct scsi_host_template aha1740_template = {
544 .can_queue = AHA1740_ECBS, 544 .can_queue = AHA1740_ECBS,
545 .this_id = 7, 545 .this_id = 7,
546 .sg_tablesize = AHA1740_SCATTER, 546 .sg_tablesize = AHA1740_SCATTER,
547 .cmd_per_lun = AHA1740_CMDLUN,
548 .use_clustering = ENABLE_CLUSTERING, 547 .use_clustering = ENABLE_CLUSTERING,
549 .eh_abort_handler = aha1740_eh_abort_handler, 548 .eh_abort_handler = aha1740_eh_abort_handler,
550}; 549};
diff --git a/drivers/scsi/aha1740.h b/drivers/scsi/aha1740.h
index af23fd6bd795..b0c5603461ca 100644
--- a/drivers/scsi/aha1740.h
+++ b/drivers/scsi/aha1740.h
@@ -149,6 +149,5 @@ struct ecb { /* Enhanced Control Block 6.1 */
149 149
150#define AHA1740_ECBS 32 150#define AHA1740_ECBS 32
151#define AHA1740_SCATTER 16 151#define AHA1740_SCATTER 16
152#define AHA1740_CMDLUN 1
153 152
154#endif 153#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 02a2512b76a8..4b135cca42a1 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -65,7 +65,6 @@ static struct scsi_host_template aic94xx_sht = {
65 .change_queue_depth = sas_change_queue_depth, 65 .change_queue_depth = sas_change_queue_depth,
66 .bios_param = sas_bios_param, 66 .bios_param = sas_bios_param,
67 .can_queue = 1, 67 .can_queue = 1,
68 .cmd_per_lun = 1,
69 .this_id = -1, 68 .this_id = -1,
70 .sg_tablesize = SG_ALL, 69 .sg_tablesize = SG_ALL,
71 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 70 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
index 32d23212de48..3110736fd337 100644
--- a/drivers/scsi/arm/arxescsi.c
+++ b/drivers/scsi/arm/arxescsi.c
@@ -245,7 +245,6 @@ static struct scsi_host_template arxescsi_template = {
245 .can_queue = 0, 245 .can_queue = 0,
246 .this_id = 7, 246 .this_id = 7,
247 .sg_tablesize = SG_ALL, 247 .sg_tablesize = SG_ALL,
248 .cmd_per_lun = 1,
249 .use_clustering = DISABLE_CLUSTERING, 248 .use_clustering = DISABLE_CLUSTERING,
250 .proc_name = "arxescsi", 249 .proc_name = "arxescsi",
251}; 250};
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index abc66f5263ec..faa1bee07c8a 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -367,7 +367,6 @@ static struct scsi_host_template cumanascsi2_template = {
367 .this_id = 7, 367 .this_id = 7,
368 .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, 368 .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
369 .dma_boundary = IOMD_DMA_BOUNDARY, 369 .dma_boundary = IOMD_DMA_BOUNDARY,
370 .cmd_per_lun = 1,
371 .use_clustering = DISABLE_CLUSTERING, 370 .use_clustering = DISABLE_CLUSTERING,
372 .proc_name = "cumanascsi2", 371 .proc_name = "cumanascsi2",
373}; 372};
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index 5bf3c0d134b4..a8ad6880dd91 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -486,7 +486,6 @@ static struct scsi_host_template eesox_template = {
486 .this_id = 7, 486 .this_id = 7,
487 .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, 487 .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
488 .dma_boundary = IOMD_DMA_BOUNDARY, 488 .dma_boundary = IOMD_DMA_BOUNDARY,
489 .cmd_per_lun = 1,
490 .use_clustering = DISABLE_CLUSTERING, 489 .use_clustering = DISABLE_CLUSTERING,
491 .proc_name = "eesox", 490 .proc_name = "eesox",
492}; 491};
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 0836433e3a2d..05301bc752ee 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -3158,7 +3158,6 @@ static struct scsi_host_template atp870u_template = {
3158 .can_queue = qcnt /* can_queue */, 3158 .can_queue = qcnt /* can_queue */,
3159 .this_id = 7 /* SCSI ID */, 3159 .this_id = 7 /* SCSI ID */,
3160 .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/, 3160 .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/,
3161 .cmd_per_lun = ATP870U_CMDLUN /* commands per lun */,
3162 .use_clustering = ENABLE_CLUSTERING, 3161 .use_clustering = ENABLE_CLUSTERING,
3163 .max_sectors = ATP870U_MAX_SECTORS, 3162 .max_sectors = ATP870U_MAX_SECTORS,
3164}; 3163};
diff --git a/drivers/scsi/atp870u.h b/drivers/scsi/atp870u.h
index 62bae64a01c1..5cf62566ad42 100644
--- a/drivers/scsi/atp870u.h
+++ b/drivers/scsi/atp870u.h
@@ -10,7 +10,6 @@
10#define MAX_SENSE 14 10#define MAX_SENSE 14
11#define qcnt 32 11#define qcnt 32
12#define ATP870U_SCATTER 128 12#define ATP870U_SCATTER 128
13#define ATP870U_CMDLUN 1
14 13
15#define MAX_ADAPTER 8 14#define MAX_ADAPTER 8
16#define MAX_SCSI_ID 16 15#define MAX_SCSI_ID 16
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 447cf7ce606e..185391a64d4b 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -452,6 +452,7 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
452 ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && 452 ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
453 (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) { 453 (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
454 phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT; 454 phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
455 phba->get_boot = BE_GET_BOOT_RETRIES;
455 456
456 beiscsi_log(phba, KERN_ERR, 457 beiscsi_log(phba, KERN_ERR,
457 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, 458 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
@@ -480,6 +481,7 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
480 case ASYNC_EVENT_NEW_ISCSI_CONN: 481 case ASYNC_EVENT_NEW_ISCSI_CONN:
481 case ASYNC_EVENT_NEW_TCP_CONN: 482 case ASYNC_EVENT_NEW_TCP_CONN:
482 phba->state |= BE_ADAPTER_CHECK_BOOT; 483 phba->state |= BE_ADAPTER_CHECK_BOOT;
484 phba->get_boot = BE_GET_BOOT_RETRIES;
483 beiscsi_log(phba, KERN_ERR, 485 beiscsi_log(phba, KERN_ERR,
484 BEISCSI_LOG_CONFIG | 486 BEISCSI_LOG_CONFIG |
485 BEISCSI_LOG_MBOX, 487 BEISCSI_LOG_MBOX,
@@ -488,6 +490,8 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
488 compl->flags); 490 compl->flags);
489 break; 491 break;
490 default: 492 default:
493 phba->state |= BE_ADAPTER_CHECK_BOOT;
494 phba->get_boot = BE_GET_BOOT_RETRIES;
491 beiscsi_log(phba, KERN_ERR, 495 beiscsi_log(phba, KERN_ERR,
492 BEISCSI_LOG_CONFIG | 496 BEISCSI_LOG_CONFIG |
493 BEISCSI_LOG_MBOX, 497 BEISCSI_LOG_MBOX,
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index f11d325fe696..cdfbc5c19cf4 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -304,6 +304,17 @@ struct mgmt_auth_method_format {
304 struct mgmt_chap_format chap; 304 struct mgmt_chap_format chap;
305} __packed; 305} __packed;
306 306
307struct be_cmd_req_logout_fw_sess {
308 struct be_cmd_req_hdr hdr; /* dw[4] */
309 uint32_t session_handle;
310} __packed;
311
312struct be_cmd_resp_logout_fw_sess {
313 struct be_cmd_resp_hdr hdr; /* dw[4] */
314#define BEISCSI_MGMT_SESSION_CLOSE 0x20
315 uint32_t session_status;
316} __packed;
317
307struct mgmt_conn_login_options { 318struct mgmt_conn_login_options {
308 u8 flags; 319 u8 flags;
309 u8 header_digest; 320 u8 header_digest;
@@ -1136,6 +1147,7 @@ struct be_cmd_get_all_if_id_req {
1136#define OPCODE_ISCSI_INI_CFG_GET_HBA_NAME 6 1147#define OPCODE_ISCSI_INI_CFG_GET_HBA_NAME 6
1137#define OPCODE_ISCSI_INI_CFG_SET_HBA_NAME 7 1148#define OPCODE_ISCSI_INI_CFG_SET_HBA_NAME 7
1138#define OPCODE_ISCSI_INI_SESSION_GET_A_SESSION 14 1149#define OPCODE_ISCSI_INI_SESSION_GET_A_SESSION 14
1150#define OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET 24
1139#define OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS 36 1151#define OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS 36
1140#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41 1152#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
1141#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42 1153#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 1f74760ce86c..7a6dbfbccec9 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -668,14 +668,20 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
668 return ret; 668 return ret;
669 } 669 }
670 670
671 ret = pci_request_regions(pcidev, DRV_NAME);
672 if (ret) {
673 dev_err(&pcidev->dev,
674 "beiscsi_enable_pci - request region failed\n");
675 goto pci_dev_disable;
676 }
677
671 pci_set_master(pcidev); 678 pci_set_master(pcidev);
672 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); 679 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
673 if (ret) { 680 if (ret) {
674 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); 681 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
675 if (ret) { 682 if (ret) {
676 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 683 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
677 pci_disable_device(pcidev); 684 goto pci_region_release;
678 return ret;
679 } else { 685 } else {
680 ret = pci_set_consistent_dma_mask(pcidev, 686 ret = pci_set_consistent_dma_mask(pcidev,
681 DMA_BIT_MASK(32)); 687 DMA_BIT_MASK(32));
@@ -684,11 +690,17 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
684 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); 690 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
685 if (ret) { 691 if (ret) {
686 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 692 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
687 pci_disable_device(pcidev); 693 goto pci_region_release;
688 return ret;
689 } 694 }
690 } 695 }
691 return 0; 696 return 0;
697
698pci_region_release:
699 pci_release_regions(pcidev);
700pci_dev_disable:
701 pci_disable_device(pcidev);
702
703 return ret;
692} 704}
693 705
694static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) 706static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
@@ -1356,8 +1368,10 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
1356 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) 1368 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ)
1357 conn->rxdata_octets += resid; 1369 conn->rxdata_octets += resid;
1358unmap: 1370unmap:
1359 scsi_dma_unmap(io_task->scsi_cmnd); 1371 if (io_task->scsi_cmnd) {
1360 io_task->scsi_cmnd = NULL; 1372 scsi_dma_unmap(io_task->scsi_cmnd);
1373 io_task->scsi_cmnd = NULL;
1374 }
1361 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); 1375 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1362} 1376}
1363 1377
@@ -2037,11 +2051,16 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
2037 /* Interpret compl as a async link evt */ 2051 /* Interpret compl as a async link evt */
2038 beiscsi_async_link_state_process(phba, 2052 beiscsi_async_link_state_process(phba,
2039 (struct be_async_event_link_state *) mcc_compl); 2053 (struct be_async_event_link_state *) mcc_compl);
2040 else 2054 else {
2041 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX, 2055 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX,
2042 "BM_%d : Unsupported Async Event, flags" 2056 "BM_%d : Unsupported Async Event, flags"
2043 " = 0x%08x\n", 2057 " = 0x%08x\n",
2044 mcc_compl->flags); 2058 mcc_compl->flags);
2059 if (phba->state & BE_ADAPTER_LINK_UP) {
2060 phba->state |= BE_ADAPTER_CHECK_BOOT;
2061 phba->get_boot = BE_GET_BOOT_RETRIES;
2062 }
2063 }
2045 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 2064 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
2046 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl); 2065 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
2047 atomic_dec(&phba->ctrl.mcc_obj.q.used); 2066 atomic_dec(&phba->ctrl.mcc_obj.q.used);
@@ -3678,14 +3697,16 @@ static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3678 struct be_ctrl_info *ctrl = &phba->ctrl; 3697 struct be_ctrl_info *ctrl = &phba->ctrl;
3679 3698
3680 q = &phba->ctrl.mcc_obj.q; 3699 q = &phba->ctrl.mcc_obj.q;
3681 if (q->created) 3700 if (q->created) {
3682 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); 3701 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3683 be_queue_free(phba, q); 3702 be_queue_free(phba, q);
3703 }
3684 3704
3685 q = &phba->ctrl.mcc_obj.cq; 3705 q = &phba->ctrl.mcc_obj.cq;
3686 if (q->created) 3706 if (q->created) {
3687 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3707 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3688 be_queue_free(phba, q); 3708 be_queue_free(phba, q);
3709 }
3689} 3710}
3690 3711
3691static void hwi_cleanup(struct beiscsi_hba *phba) 3712static void hwi_cleanup(struct beiscsi_hba *phba)
@@ -3729,8 +3750,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
3729 3750
3730 for (i = 0; i < (phba->num_cpus); i++) { 3751 for (i = 0; i < (phba->num_cpus); i++) {
3731 q = &phwi_context->be_cq[i]; 3752 q = &phwi_context->be_cq[i];
3732 if (q->created) 3753 if (q->created) {
3754 be_queue_free(phba, q);
3733 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3755 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3756 }
3734 } 3757 }
3735 3758
3736 be_mcc_queues_destroy(phba); 3759 be_mcc_queues_destroy(phba);
@@ -3740,8 +3763,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
3740 eq_for_mcc = 0; 3763 eq_for_mcc = 0;
3741 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3764 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3742 q = &phwi_context->be_eq[i].q; 3765 q = &phwi_context->be_eq[i].q;
3743 if (q->created) 3766 if (q->created) {
3767 be_queue_free(phba, q);
3744 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3768 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3769 }
3745 } 3770 }
3746 be_cmd_fw_uninit(ctrl); 3771 be_cmd_fw_uninit(ctrl);
3747} 3772}
@@ -4328,8 +4353,14 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
4328 beiscsi_log(phba, KERN_ERR, 4353 beiscsi_log(phba, KERN_ERR,
4329 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 4354 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4330 "BM_%d : No boot session\n"); 4355 "BM_%d : No boot session\n");
4356
4357 if (ret == -ENXIO)
4358 phba->get_boot = 0;
4359
4360
4331 return ret; 4361 return ret;
4332 } 4362 }
4363 phba->get_boot = 0;
4333 nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev, 4364 nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
4334 sizeof(*session_resp), 4365 sizeof(*session_resp),
4335 &nonemb_cmd.dma); 4366 &nonemb_cmd.dma);
@@ -4369,6 +4400,9 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
4369 4400
4370 memcpy(&phba->boot_sess, &session_resp->session_info, 4401 memcpy(&phba->boot_sess, &session_resp->session_info,
4371 sizeof(struct mgmt_session_info)); 4402 sizeof(struct mgmt_session_info));
4403
4404 beiscsi_logout_fw_sess(phba,
4405 phba->boot_sess.session_handle);
4372 ret = 0; 4406 ret = 0;
4373 4407
4374boot_freemem: 4408boot_freemem:
@@ -4580,11 +4614,13 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
4580 spin_unlock_bh(&phba->mgmt_sgl_lock); 4614 spin_unlock_bh(&phba->mgmt_sgl_lock);
4581 } 4615 }
4582 4616
4583 if (io_task->mtask_addr) 4617 if (io_task->mtask_addr) {
4584 pci_unmap_single(phba->pcidev, 4618 pci_unmap_single(phba->pcidev,
4585 io_task->mtask_addr, 4619 io_task->mtask_addr,
4586 io_task->mtask_data_count, 4620 io_task->mtask_data_count,
4587 PCI_DMA_TODEVICE); 4621 PCI_DMA_TODEVICE);
4622 io_task->mtask_addr = 0;
4623 }
4588} 4624}
4589 4625
4590/** 4626/**
@@ -5264,6 +5300,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
5264 iscsi_host_free(phba->shost); 5300 iscsi_host_free(phba->shost);
5265 pci_disable_pcie_error_reporting(pcidev); 5301 pci_disable_pcie_error_reporting(pcidev);
5266 pci_set_drvdata(pcidev, NULL); 5302 pci_set_drvdata(pcidev, NULL);
5303 pci_release_regions(pcidev);
5267 pci_disable_device(pcidev); 5304 pci_disable_device(pcidev);
5268} 5305}
5269 5306
@@ -5374,8 +5411,14 @@ beiscsi_hw_health_check(struct work_struct *work)
5374 be_eqd_update(phba); 5411 be_eqd_update(phba);
5375 5412
5376 if (phba->state & BE_ADAPTER_CHECK_BOOT) { 5413 if (phba->state & BE_ADAPTER_CHECK_BOOT) {
5377 phba->state &= ~BE_ADAPTER_CHECK_BOOT; 5414 if ((phba->get_boot > 0) && (!phba->boot_kset)) {
5378 be_check_boot_session(phba); 5415 phba->get_boot--;
5416 if (!(phba->get_boot % BE_GET_BOOT_TO))
5417 be_check_boot_session(phba);
5418 } else {
5419 phba->state &= ~BE_ADAPTER_CHECK_BOOT;
5420 phba->get_boot = 0;
5421 }
5379 } 5422 }
5380 5423
5381 beiscsi_ue_detect(phba); 5424 beiscsi_ue_detect(phba);
@@ -5738,6 +5781,7 @@ hba_free:
5738 iscsi_host_free(phba->shost); 5781 iscsi_host_free(phba->shost);
5739 pci_set_drvdata(pcidev, NULL); 5782 pci_set_drvdata(pcidev, NULL);
5740disable_pci: 5783disable_pci:
5784 pci_release_regions(pcidev);
5741 pci_disable_device(pcidev); 5785 pci_disable_device(pcidev);
5742 return ret; 5786 return ret;
5743} 5787}
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index e70ea26bbc2b..b8c0c7819cb1 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -36,7 +36,7 @@
36#include <scsi/scsi_transport_iscsi.h> 36#include <scsi/scsi_transport_iscsi.h>
37 37
38#define DRV_NAME "be2iscsi" 38#define DRV_NAME "be2iscsi"
39#define BUILD_STR "10.4.114.0" 39#define BUILD_STR "10.6.0.0"
40#define BE_NAME "Avago Technologies OneConnect" \ 40#define BE_NAME "Avago Technologies OneConnect" \
41 "Open-iSCSI Driver version" BUILD_STR 41 "Open-iSCSI Driver version" BUILD_STR
42#define DRV_DESC BE_NAME " " "Driver" 42#define DRV_DESC BE_NAME " " "Driver"
@@ -109,6 +109,9 @@
109 109
110#define BEISCSI_CLEAN_UNLOAD 0x01 110#define BEISCSI_CLEAN_UNLOAD 0x01
111#define BEISCSI_EEH_UNLOAD 0x02 111#define BEISCSI_EEH_UNLOAD 0x02
112
113#define BE_GET_BOOT_RETRIES 45
114#define BE_GET_BOOT_TO 20
112/** 115/**
113 * hardware needs the async PDU buffers to be posted in multiples of 8 116 * hardware needs the async PDU buffers to be posted in multiples of 8
114 * So have atleast 8 of them by default 117 * So have atleast 8 of them by default
@@ -413,6 +416,7 @@ struct beiscsi_hba {
413 } fw_config; 416 } fw_config;
414 417
415 unsigned int state; 418 unsigned int state;
419 int get_boot;
416 bool fw_timeout; 420 bool fw_timeout;
417 bool ue_detected; 421 bool ue_detected;
418 struct delayed_work beiscsi_hw_check_task; 422 struct delayed_work beiscsi_hw_check_task;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index c2c4d6975fb7..ca4016f20e76 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1707,3 +1707,72 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
1707 (params->dw[offsetof(struct amap_beiscsi_offload_params, 1707 (params->dw[offsetof(struct amap_beiscsi_offload_params,
1708 exp_statsn) / 32] + 1)); 1708 exp_statsn) / 32] + 1));
1709} 1709}
1710
1711/**
1712 * beiscsi_logout_fw_sess()- Firmware Session Logout
1713 * @phba: Device priv structure instance
1714 * @fw_sess_handle: FW session handle
1715 *
1716 * Logout from the FW established sessions.
1717 * returns
1718 * Success: 0
1719 * Failure: Non-Zero Value
1720 *
1721 */
1722int beiscsi_logout_fw_sess(struct beiscsi_hba *phba,
1723 uint32_t fw_sess_handle)
1724{
1725 struct be_ctrl_info *ctrl = &phba->ctrl;
1726 struct be_mcc_wrb *wrb;
1727 struct be_cmd_req_logout_fw_sess *req;
1728 struct be_cmd_resp_logout_fw_sess *resp;
1729 unsigned int tag;
1730 int rc;
1731
1732 beiscsi_log(phba, KERN_INFO,
1733 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
1734 "BG_%d : In bescsi_logout_fwboot_sess\n");
1735
1736 spin_lock(&ctrl->mbox_lock);
1737 tag = alloc_mcc_tag(phba);
1738 if (!tag) {
1739 spin_unlock(&ctrl->mbox_lock);
1740 beiscsi_log(phba, KERN_INFO,
1741 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
1742 "BG_%d : MBX Tag Failure\n");
1743 return -EINVAL;
1744 }
1745
1746 wrb = wrb_from_mccq(phba);
1747 req = embedded_payload(wrb);
1748 wrb->tag0 |= tag;
1749 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1750 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
1751 OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET,
1752 sizeof(struct be_cmd_req_logout_fw_sess));
1753
1754 /* Set the session handle */
1755 req->session_handle = fw_sess_handle;
1756 be_mcc_notify(phba);
1757 spin_unlock(&ctrl->mbox_lock);
1758
1759 rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
1760 if (rc) {
1761 beiscsi_log(phba, KERN_ERR,
1762 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
1763 "BG_%d : MBX CMD FW_SESSION_LOGOUT_TARGET Failed\n");
1764 return -EBUSY;
1765 }
1766
1767 resp = embedded_payload(wrb);
1768 if (resp->session_status !=
1769 BEISCSI_MGMT_SESSION_CLOSE) {
1770 beiscsi_log(phba, KERN_ERR,
1771 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
1772 "BG_%d : FW_SESSION_LOGOUT_TARGET resp : 0x%x\n",
1773 resp->session_status);
1774 rc = -EINVAL;
1775 }
1776
1777 return rc;
1778}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 9356b9a86b66..b58a7decbd94 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -338,4 +338,7 @@ void beiscsi_ue_detect(struct beiscsi_hba *phba);
338int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, 338int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
339 struct be_set_eqd *, int num); 339 struct be_set_eqd *, int num);
340 340
341int beiscsi_logout_fw_sess(struct beiscsi_hba *phba,
342 uint32_t fw_sess_handle);
343
341#endif 344#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index e53078d03309..72894378ffcf 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1173,8 +1173,10 @@ static void bnx2i_cleanup_task(struct iscsi_task *task)
1173 bnx2i_send_cmd_cleanup_req(hba, task->dd_data); 1173 bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
1174 1174
1175 spin_unlock_bh(&conn->session->back_lock); 1175 spin_unlock_bh(&conn->session->back_lock);
1176 spin_unlock_bh(&conn->session->frwd_lock);
1176 wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl, 1177 wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
1177 msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT)); 1178 msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
1179 spin_lock_bh(&conn->session->frwd_lock);
1178 spin_lock_bh(&conn->session->back_lock); 1180 spin_lock_bh(&conn->session->back_lock);
1179 } 1181 }
1180 bnx2i_iscsi_unmap_sg_list(task->dd_data); 1182 bnx2i_iscsi_unmap_sg_list(task->dd_data);
@@ -2093,7 +2095,8 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
2093 else 2095 else
2094 /* wait for option-2 conn teardown */ 2096 /* wait for option-2 conn teardown */
2095 wait_event_interruptible(bnx2i_ep->ofld_wait, 2097 wait_event_interruptible(bnx2i_ep->ofld_wait,
2096 bnx2i_ep->state != EP_STATE_DISCONN_START); 2098 ((bnx2i_ep->state != EP_STATE_DISCONN_START)
2099 && (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD)));
2097 2100
2098 if (signal_pending(current)) 2101 if (signal_pending(current))
2099 flush_signals(current); 2102 flush_signals(current);
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 2e66f34ebb79..622bdabc8894 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -3928,6 +3928,7 @@ csio_hw_init(struct csio_hw *hw)
3928 3928
3929 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL); 3929 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL);
3930 if (!evt_entry) { 3930 if (!evt_entry) {
3931 rv = -ENOMEM;
3931 csio_err(hw, "Failed to initialize eventq"); 3932 csio_err(hw, "Failed to initialize eventq");
3932 goto err_evtq_cleanup; 3933 goto err_evtq_cleanup;
3933 } 3934 }
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 3db4c63978c5..0e2bee937fe8 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management 2 * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
3 * 3 *
4 * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved. 4 * Copyright (C) 2003-2015 Chelsio Communications. All rights reserved.
5 * 5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT 6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@@ -32,8 +32,8 @@ static unsigned int dbg_level;
32 32
33#define DRV_MODULE_NAME "cxgb3i" 33#define DRV_MODULE_NAME "cxgb3i"
34#define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver" 34#define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver"
35#define DRV_MODULE_VERSION "2.0.0" 35#define DRV_MODULE_VERSION "2.0.1-ko"
36#define DRV_MODULE_RELDATE "Jun. 2010" 36#define DRV_MODULE_RELDATE "Apr. 2015"
37 37
38static char version[] = 38static char version[] =
39 DRV_MODULE_DESC " " DRV_MODULE_NAME 39 DRV_MODULE_DESC " " DRV_MODULE_NAME
@@ -156,7 +156,7 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion);
156static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, 156static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
157 const struct l2t_entry *e) 157 const struct l2t_entry *e)
158{ 158{
159 unsigned int wscale = cxgbi_sock_compute_wscale(cxgb3i_rcv_win); 159 unsigned int wscale = cxgbi_sock_compute_wscale(csk->rcv_win);
160 struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head; 160 struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head;
161 161
162 skb->priority = CPL_PRIORITY_SETUP; 162 skb->priority = CPL_PRIORITY_SETUP;
@@ -172,7 +172,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
172 V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) | 172 V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) |
173 V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx)); 173 V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx));
174 req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) | 174 req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) |
175 V_RCV_BUFSIZ(cxgb3i_rcv_win>>10)); 175 V_RCV_BUFSIZ(csk->rcv_win >> 10));
176 176
177 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 177 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
178 "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n", 178 "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n",
@@ -369,7 +369,7 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
369 req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT | 369 req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
370 V_TX_CPU_IDX(csk->rss_qid)); 370 V_TX_CPU_IDX(csk->rss_qid));
371 /* sendbuffer is in units of 32KB. */ 371 /* sendbuffer is in units of 32KB. */
372 req->param |= htonl(V_TX_SNDBUF(cxgb3i_snd_win >> 15)); 372 req->param |= htonl(V_TX_SNDBUF(csk->snd_win >> 15));
373 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 373 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
374 } 374 }
375} 375}
@@ -503,8 +503,8 @@ static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
503 csk, csk->state, csk->flags, csk->tid); 503 csk, csk->state, csk->flags, csk->tid);
504 504
505 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; 505 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
506 if (cxgb3i_rcv_win > (M_RCV_BUFSIZ << 10)) 506 if (csk->rcv_win > (M_RCV_BUFSIZ << 10))
507 csk->rcv_wup -= cxgb3i_rcv_win - (M_RCV_BUFSIZ << 10); 507 csk->rcv_wup -= csk->rcv_win - (M_RCV_BUFSIZ << 10);
508 508
509 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); 509 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
510 510
@@ -988,6 +988,8 @@ static int init_act_open(struct cxgbi_sock *csk)
988 goto rel_resource; 988 goto rel_resource;
989 skb->sk = (struct sock *)csk; 989 skb->sk = (struct sock *)csk;
990 set_arp_failure_handler(skb, act_open_arp_failure); 990 set_arp_failure_handler(skb, act_open_arp_failure);
991 csk->snd_win = cxgb3i_snd_win;
992 csk->rcv_win = cxgb3i_rcv_win;
991 993
992 csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1; 994 csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1;
993 csk->wr_una_cred = 0; 995 csk->wr_una_cred = 0;
@@ -1320,8 +1322,6 @@ static void cxgb3i_dev_open(struct t3cdev *t3dev)
1320 cdev->nports = adapter->params.nports; 1322 cdev->nports = adapter->params.nports;
1321 cdev->mtus = adapter->params.mtus; 1323 cdev->mtus = adapter->params.mtus;
1322 cdev->nmtus = NMTUS; 1324 cdev->nmtus = NMTUS;
1323 cdev->snd_win = cxgb3i_snd_win;
1324 cdev->rcv_win = cxgb3i_rcv_win;
1325 cdev->rx_credit_thres = cxgb3i_rx_credit_thres; 1325 cdev->rx_credit_thres = cxgb3i_rx_credit_thres;
1326 cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN; 1326 cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN;
1327 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss); 1327 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
index 20593fd69d8f..b0430c9359e7 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * cxgb3i.h: Chelsio S3xx iSCSI driver. 2 * cxgb3i.h: Chelsio S3xx iSCSI driver.
3 * 3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc. 4 * Copyright (c) 2008-2015 Chelsio Communications, Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index dd00e5fe4a5e..de6feb8964c9 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * cxgb4i.c: Chelsio T4 iSCSI driver. 2 * cxgb4i.c: Chelsio T4 iSCSI driver.
3 * 3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc. 4 * Copyright (c) 2010-2015 Chelsio Communications, Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -36,11 +36,12 @@ static unsigned int dbg_level;
36 36
37#define DRV_MODULE_NAME "cxgb4i" 37#define DRV_MODULE_NAME "cxgb4i"
38#define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver" 38#define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver"
39#define DRV_MODULE_VERSION "0.9.4" 39#define DRV_MODULE_VERSION "0.9.5-ko"
40#define DRV_MODULE_RELDATE "Apr. 2015"
40 41
41static char version[] = 42static char version[] =
42 DRV_MODULE_DESC " " DRV_MODULE_NAME 43 DRV_MODULE_DESC " " DRV_MODULE_NAME
43 " v" DRV_MODULE_VERSION "\n"; 44 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
44 45
45MODULE_AUTHOR("Chelsio Communications, Inc."); 46MODULE_AUTHOR("Chelsio Communications, Inc.");
46MODULE_DESCRIPTION(DRV_MODULE_DESC); 47MODULE_DESCRIPTION(DRV_MODULE_DESC);
@@ -50,11 +51,13 @@ MODULE_LICENSE("GPL");
50module_param(dbg_level, uint, 0644); 51module_param(dbg_level, uint, 0644);
51MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)"); 52MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
52 53
53static int cxgb4i_rcv_win = 256 * 1024; 54#define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024)
55static int cxgb4i_rcv_win = -1;
54module_param(cxgb4i_rcv_win, int, 0644); 56module_param(cxgb4i_rcv_win, int, 0644);
55MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes"); 57MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
56 58
57static int cxgb4i_snd_win = 128 * 1024; 59#define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024)
60static int cxgb4i_snd_win = -1;
58module_param(cxgb4i_snd_win, int, 0644); 61module_param(cxgb4i_snd_win, int, 0644);
59MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes"); 62MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
60 63
@@ -196,10 +199,10 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
196 TX_CHAN_V(csk->tx_chan) | 199 TX_CHAN_V(csk->tx_chan) |
197 SMAC_SEL_V(csk->smac_idx) | 200 SMAC_SEL_V(csk->smac_idx) |
198 ULP_MODE_V(ULP_MODE_ISCSI) | 201 ULP_MODE_V(ULP_MODE_ISCSI) |
199 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10); 202 RCV_BUFSIZ_V(csk->rcv_win >> 10);
203
200 opt2 = RX_CHANNEL_V(0) | 204 opt2 = RX_CHANNEL_V(0) |
201 RSS_QUEUE_VALID_F | 205 RSS_QUEUE_VALID_F |
202 (RX_FC_DISABLE_F) |
203 RSS_QUEUE_V(csk->rss_qid); 206 RSS_QUEUE_V(csk->rss_qid);
204 207
205 if (is_t4(lldi->adapter_type)) { 208 if (is_t4(lldi->adapter_type)) {
@@ -228,6 +231,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
228 } else { 231 } else {
229 struct cpl_t5_act_open_req *req = 232 struct cpl_t5_act_open_req *req =
230 (struct cpl_t5_act_open_req *)skb->head; 233 (struct cpl_t5_act_open_req *)skb->head;
234 u32 isn = (prandom_u32() & ~7UL) - 1;
231 235
232 INIT_TP_WR(req, 0); 236 INIT_TP_WR(req, 0);
233 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 237 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
@@ -241,7 +245,10 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
241 cxgb4_select_ntuple( 245 cxgb4_select_ntuple(
242 csk->cdev->ports[csk->port_id], 246 csk->cdev->ports[csk->port_id],
243 csk->l2t))); 247 csk->l2t)));
244 opt2 |= 1 << 31; 248 req->rsvd = cpu_to_be32(isn);
249 opt2 |= T5_ISS_VALID;
250 opt2 |= T5_OPT_2_VALID_F;
251
245 req->opt2 = cpu_to_be32(opt2); 252 req->opt2 = cpu_to_be32(opt2);
246 253
247 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 254 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
@@ -279,7 +286,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
279 TX_CHAN_V(csk->tx_chan) | 286 TX_CHAN_V(csk->tx_chan) |
280 SMAC_SEL_V(csk->smac_idx) | 287 SMAC_SEL_V(csk->smac_idx) |
281 ULP_MODE_V(ULP_MODE_ISCSI) | 288 ULP_MODE_V(ULP_MODE_ISCSI) |
282 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10); 289 RCV_BUFSIZ_V(csk->rcv_win >> 10);
283 290
284 opt2 = RX_CHANNEL_V(0) | 291 opt2 = RX_CHANNEL_V(0) |
285 RSS_QUEUE_VALID_F | 292 RSS_QUEUE_VALID_F |
@@ -544,7 +551,7 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
544 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 551 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
545 flowc->mnemval[5].val = htonl(csk->rcv_nxt); 552 flowc->mnemval[5].val = htonl(csk->rcv_nxt);
546 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 553 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
547 flowc->mnemval[6].val = htonl(cxgb4i_snd_win); 554 flowc->mnemval[6].val = htonl(csk->snd_win);
548 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 555 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
549 flowc->mnemval[7].val = htonl(csk->advmss); 556 flowc->mnemval[7].val = htonl(csk->advmss);
550 flowc->mnemval[8].mnemonic = 0; 557 flowc->mnemval[8].mnemonic = 0;
@@ -557,7 +564,7 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
557 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 564 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
558 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n", 565 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
559 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, 566 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
560 csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win, 567 csk->snd_nxt, csk->rcv_nxt, csk->snd_win,
561 csk->advmss); 568 csk->advmss);
562 569
563 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 570 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
@@ -750,8 +757,8 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
750 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't 757 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
751 * pass through opt0. 758 * pass through opt0.
752 */ 759 */
753 if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10)) 760 if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10))
754 csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10); 761 csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10);
755 762
756 csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40; 763 csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
757 if (TCPOPT_TSTAMP_G(tcp_opt)) 764 if (TCPOPT_TSTAMP_G(tcp_opt))
@@ -1367,6 +1374,8 @@ static int init_act_open(struct cxgbi_sock *csk)
1367 unsigned int step; 1374 unsigned int step;
1368 unsigned int size, size6; 1375 unsigned int size, size6;
1369 int t4 = is_t4(lldi->adapter_type); 1376 int t4 = is_t4(lldi->adapter_type);
1377 unsigned int linkspeed;
1378 unsigned int rcv_winf, snd_winf;
1370 1379
1371 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1380 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1372 "csk 0x%p,%u,0x%lx,%u.\n", 1381 "csk 0x%p,%u,0x%lx,%u.\n",
@@ -1440,6 +1449,21 @@ static int init_act_open(struct cxgbi_sock *csk)
1440 csk->txq_idx = cxgb4_port_idx(ndev) * step; 1449 csk->txq_idx = cxgb4_port_idx(ndev) * step;
1441 step = lldi->nrxq / lldi->nchan; 1450 step = lldi->nrxq / lldi->nchan;
1442 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step]; 1451 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
1452 linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed;
1453 csk->snd_win = cxgb4i_snd_win;
1454 csk->rcv_win = cxgb4i_rcv_win;
1455 if (cxgb4i_rcv_win <= 0) {
1456 csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN;
1457 rcv_winf = linkspeed / SPEED_10000;
1458 if (rcv_winf)
1459 csk->rcv_win *= rcv_winf;
1460 }
1461 if (cxgb4i_snd_win <= 0) {
1462 csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN;
1463 snd_winf = linkspeed / SPEED_10000;
1464 if (snd_winf)
1465 csk->snd_win *= snd_winf;
1466 }
1443 csk->wr_cred = lldi->wr_cred - 1467 csk->wr_cred = lldi->wr_cred -
1444 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); 1468 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1445 csk->wr_max_cred = csk->wr_cred; 1469 csk->wr_max_cred = csk->wr_cred;
@@ -1758,8 +1782,6 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1758 cdev->nports = lldi->nports; 1782 cdev->nports = lldi->nports;
1759 cdev->mtus = lldi->mtus; 1783 cdev->mtus = lldi->mtus;
1760 cdev->nmtus = NMTUS; 1784 cdev->nmtus = NMTUS;
1761 cdev->snd_win = cxgb4i_snd_win;
1762 cdev->rcv_win = cxgb4i_rcv_win;
1763 cdev->rx_credit_thres = cxgb4i_rx_credit_thres; 1785 cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
1764 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN; 1786 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
1765 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); 1787 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
index 1096026ba241..22dd8d670e4a 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * cxgb4i.h: Chelsio T4 iSCSI driver. 2 * cxgb4i.h: Chelsio T4 iSCSI driver.
3 * 3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc. 4 * Copyright (c) 2010-2015 Chelsio Communications, Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -23,6 +23,8 @@
23#define CXGB4I_TX_HEADER_LEN \ 23#define CXGB4I_TX_HEADER_LEN \
24 (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr)) 24 (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
25 25
26#define T5_ISS_VALID (1 << 18)
27
26struct ulptx_idata { 28struct ulptx_idata {
27 __be32 cmd_more; 29 __be32 cmd_more;
28 __be32 len; 30 __be32 len;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index eb58afcfb73b..1d42e4f88b96 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver. 2 * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
3 * 3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc. 4 * Copyright (c) 2010-2015 Chelsio Communications, Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -38,8 +38,12 @@ static unsigned int dbg_level;
38 38
39#define DRV_MODULE_NAME "libcxgbi" 39#define DRV_MODULE_NAME "libcxgbi"
40#define DRV_MODULE_DESC "Chelsio iSCSI driver library" 40#define DRV_MODULE_DESC "Chelsio iSCSI driver library"
41#define DRV_MODULE_VERSION "0.9.0" 41#define DRV_MODULE_VERSION "0.9.1-ko"
42#define DRV_MODULE_RELDATE "Jun. 2010" 42#define DRV_MODULE_RELDATE "Apr. 2015"
43
44static char version[] =
45 DRV_MODULE_DESC " " DRV_MODULE_NAME
46 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
43 47
44MODULE_AUTHOR("Chelsio Communications, Inc."); 48MODULE_AUTHOR("Chelsio Communications, Inc.");
45MODULE_DESCRIPTION(DRV_MODULE_DESC); 49MODULE_DESCRIPTION(DRV_MODULE_DESC);
@@ -1126,11 +1130,11 @@ static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
1126 goto out_err; 1130 goto out_err;
1127 } 1131 }
1128 1132
1129 if (csk->write_seq - csk->snd_una >= cdev->snd_win) { 1133 if (csk->write_seq - csk->snd_una >= csk->snd_win) {
1130 log_debug(1 << CXGBI_DBG_PDU_TX, 1134 log_debug(1 << CXGBI_DBG_PDU_TX,
1131 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n", 1135 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
1132 csk, csk->state, csk->flags, csk->tid, csk->write_seq, 1136 csk, csk->state, csk->flags, csk->tid, csk->write_seq,
1133 csk->snd_una, cdev->snd_win); 1137 csk->snd_una, csk->snd_win);
1134 err = -ENOBUFS; 1138 err = -ENOBUFS;
1135 goto out_err; 1139 goto out_err;
1136 } 1140 }
@@ -1885,7 +1889,7 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
1885 "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n", 1889 "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n",
1886 csk, csk->state, csk->flags, csk->tid, csk->copied_seq, 1890 csk, csk->state, csk->flags, csk->tid, csk->copied_seq,
1887 csk->rcv_wup, cdev->rx_credit_thres, 1891 csk->rcv_wup, cdev->rx_credit_thres,
1888 cdev->rcv_win); 1892 csk->rcv_win);
1889 1893
1890 if (csk->state != CTP_ESTABLISHED) 1894 if (csk->state != CTP_ESTABLISHED)
1891 return; 1895 return;
@@ -1896,7 +1900,7 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
1896 if (unlikely(cdev->rx_credit_thres == 0)) 1900 if (unlikely(cdev->rx_credit_thres == 0))
1897 return; 1901 return;
1898 1902
1899 must_send = credits + 16384 >= cdev->rcv_win; 1903 must_send = credits + 16384 >= csk->rcv_win;
1900 if (must_send || credits >= cdev->rx_credit_thres) 1904 if (must_send || credits >= cdev->rx_credit_thres)
1901 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); 1905 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
1902} 1906}
@@ -2913,6 +2917,8 @@ static int __init libcxgbi_init_module(void)
2913 sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; 2917 sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
2914 sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1; 2918 sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
2915 2919
2920 pr_info("%s", version);
2921
2916 pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n", 2922 pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
2917 ISCSI_ITT_MASK, sw_tag_idx_bits, 2923 ISCSI_ITT_MASK, sw_tag_idx_bits,
2918 ISCSI_AGE_MASK, sw_tag_age_bits); 2924 ISCSI_AGE_MASK, sw_tag_age_bits);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index aba1af720df6..b3e5bd1d5d9c 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver. 2 * libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver.
3 * 3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc. 4 * Copyright (c) 2010-2015 Chelsio Communications, Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -234,6 +234,8 @@ struct cxgbi_sock {
234 u32 snd_nxt; 234 u32 snd_nxt;
235 u32 snd_una; 235 u32 snd_una;
236 u32 write_seq; 236 u32 write_seq;
237 u32 snd_win;
238 u32 rcv_win;
237}; 239};
238 240
239/* 241/*
@@ -540,8 +542,6 @@ struct cxgbi_device {
540 struct iscsi_transport *itp; 542 struct iscsi_transport *itp;
541 543
542 unsigned int pfvf; 544 unsigned int pfvf;
543 unsigned int snd_win;
544 unsigned int rcv_win;
545 unsigned int rx_credit_thres; 545 unsigned int rx_credit_thres;
546 unsigned int skb_tx_rsvd; 546 unsigned int skb_tx_rsvd;
547 unsigned int skb_rx_extra; /* for msg coalesced mode */ 547 unsigned int skb_rx_extra; /* for msg coalesced mode */
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 2806cfbec2b9..f35ed53adaac 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -3562,7 +3562,6 @@ static struct scsi_host_template driver_template = {
3562 .slave_configure = adpt_slave_configure, 3562 .slave_configure = adpt_slave_configure,
3563 .can_queue = MAX_TO_IOP_MESSAGES, 3563 .can_queue = MAX_TO_IOP_MESSAGES,
3564 .this_id = 7, 3564 .this_id = 7,
3565 .cmd_per_lun = 1,
3566 .use_clustering = ENABLE_CLUSTERING, 3565 .use_clustering = ENABLE_CLUSTERING,
3567}; 3566};
3568 3567
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index fff682976c56..eefe14d453db 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -1764,7 +1764,6 @@ struct scsi_host_template fdomain_driver_template = {
1764 .can_queue = 1, 1764 .can_queue = 1,
1765 .this_id = 6, 1765 .this_id = 6,
1766 .sg_tablesize = 64, 1766 .sg_tablesize = 64,
1767 .cmd_per_lun = 1,
1768 .use_clustering = DISABLE_CLUSTERING, 1767 .use_clustering = DISABLE_CLUSTERING,
1769}; 1768};
1770 1769
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 8eab107b53fb..1dafeb43333b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -43,6 +43,8 @@
43#include <scsi/scsi_device.h> 43#include <scsi/scsi_device.h>
44#include <scsi/scsi_host.h> 44#include <scsi/scsi_host.h>
45#include <scsi/scsi_tcq.h> 45#include <scsi/scsi_tcq.h>
46#include <scsi/scsi_eh.h>
47#include <scsi/scsi_dbg.h>
46#include <linux/cciss_ioctl.h> 48#include <linux/cciss_ioctl.h>
47#include <linux/string.h> 49#include <linux/string.h>
48#include <linux/bitmap.h> 50#include <linux/bitmap.h>
@@ -56,7 +58,7 @@
56#include "hpsa.h" 58#include "hpsa.h"
57 59
58/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 60/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
59#define HPSA_DRIVER_VERSION "3.4.4-1" 61#define HPSA_DRIVER_VERSION "3.4.10-0"
60#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 62#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
61#define HPSA "hpsa" 63#define HPSA "hpsa"
62 64
@@ -129,6 +131,7 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, 131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, 132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, 133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
134 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
132 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, 135 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
133 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, 136 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
134 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, 137 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
@@ -186,6 +189,7 @@ static struct board_type products[] = {
186 {0x21CC103C, "Smart Array", &SA5_access}, 189 {0x21CC103C, "Smart Array", &SA5_access},
187 {0x21CD103C, "Smart Array", &SA5_access}, 190 {0x21CD103C, "Smart Array", &SA5_access},
188 {0x21CE103C, "Smart HBA", &SA5_access}, 191 {0x21CE103C, "Smart HBA", &SA5_access},
192 {0x05809005, "SmartHBA-SA", &SA5_access},
189 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, 193 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
190 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, 194 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
191 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, 195 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
@@ -194,6 +198,10 @@ static struct board_type products[] = {
194 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 198 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
195}; 199};
196 200
201#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
202static const struct scsi_cmnd hpsa_cmd_busy;
203#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
204static const struct scsi_cmnd hpsa_cmd_idle;
197static int number_of_controllers; 205static int number_of_controllers;
198 206
199static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 207static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
@@ -207,6 +215,9 @@ static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
207 215
208static void cmd_free(struct ctlr_info *h, struct CommandList *c); 216static void cmd_free(struct ctlr_info *h, struct CommandList *c);
209static struct CommandList *cmd_alloc(struct ctlr_info *h); 217static struct CommandList *cmd_alloc(struct ctlr_info *h);
218static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
219static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
220 struct scsi_cmnd *scmd);
210static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 221static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
211 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 222 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
212 int cmd_type); 223 int cmd_type);
@@ -222,6 +233,7 @@ static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
222static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 233static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
223static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); 234static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
224static int hpsa_slave_alloc(struct scsi_device *sdev); 235static int hpsa_slave_alloc(struct scsi_device *sdev);
236static int hpsa_slave_configure(struct scsi_device *sdev);
225static void hpsa_slave_destroy(struct scsi_device *sdev); 237static void hpsa_slave_destroy(struct scsi_device *sdev);
226 238
227static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); 239static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
@@ -232,7 +244,8 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
232/* performant mode helper functions */ 244/* performant mode helper functions */
233static void calc_bucket_map(int *bucket, int num_buckets, 245static void calc_bucket_map(int *bucket, int num_buckets,
234 int nsgs, int min_blocks, u32 *bucket_map); 246 int nsgs, int min_blocks, u32 *bucket_map);
235static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 247static void hpsa_free_performant_mode(struct ctlr_info *h);
248static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
236static inline u32 next_command(struct ctlr_info *h, u8 q); 249static inline u32 next_command(struct ctlr_info *h, u8 q);
237static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 250static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
238 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 251 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
@@ -252,6 +265,8 @@ static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
252 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 265 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
253 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk); 266 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
254static void hpsa_command_resubmit_worker(struct work_struct *work); 267static void hpsa_command_resubmit_worker(struct work_struct *work);
268static u32 lockup_detected(struct ctlr_info *h);
269static int detect_controller_lockup(struct ctlr_info *h);
255 270
256static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 271static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
257{ 272{
@@ -265,40 +280,86 @@ static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
265 return (struct ctlr_info *) *priv; 280 return (struct ctlr_info *) *priv;
266} 281}
267 282
283static inline bool hpsa_is_cmd_idle(struct CommandList *c)
284{
285 return c->scsi_cmd == SCSI_CMD_IDLE;
286}
287
288static inline bool hpsa_is_pending_event(struct CommandList *c)
289{
290 return c->abort_pending || c->reset_pending;
291}
292
293/* extract sense key, asc, and ascq from sense data. -1 means invalid. */
294static void decode_sense_data(const u8 *sense_data, int sense_data_len,
295 u8 *sense_key, u8 *asc, u8 *ascq)
296{
297 struct scsi_sense_hdr sshdr;
298 bool rc;
299
300 *sense_key = -1;
301 *asc = -1;
302 *ascq = -1;
303
304 if (sense_data_len < 1)
305 return;
306
307 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
308 if (rc) {
309 *sense_key = sshdr.sense_key;
310 *asc = sshdr.asc;
311 *ascq = sshdr.ascq;
312 }
313}
314
268static int check_for_unit_attention(struct ctlr_info *h, 315static int check_for_unit_attention(struct ctlr_info *h,
269 struct CommandList *c) 316 struct CommandList *c)
270{ 317{
271 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) 318 u8 sense_key, asc, ascq;
319 int sense_len;
320
321 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
322 sense_len = sizeof(c->err_info->SenseInfo);
323 else
324 sense_len = c->err_info->SenseLen;
325
326 decode_sense_data(c->err_info->SenseInfo, sense_len,
327 &sense_key, &asc, &ascq);
328 if (sense_key != UNIT_ATTENTION || asc == -1)
272 return 0; 329 return 0;
273 330
274 switch (c->err_info->SenseInfo[12]) { 331 switch (asc) {
275 case STATE_CHANGED: 332 case STATE_CHANGED:
276 dev_warn(&h->pdev->dev, HPSA "%d: a state change " 333 dev_warn(&h->pdev->dev,
277 "detected, command retried\n", h->ctlr); 334 "%s: a state change detected, command retried\n",
335 h->devname);
278 break; 336 break;
279 case LUN_FAILED: 337 case LUN_FAILED:
280 dev_warn(&h->pdev->dev, 338 dev_warn(&h->pdev->dev,
281 HPSA "%d: LUN failure detected\n", h->ctlr); 339 "%s: LUN failure detected\n", h->devname);
282 break; 340 break;
283 case REPORT_LUNS_CHANGED: 341 case REPORT_LUNS_CHANGED:
284 dev_warn(&h->pdev->dev, 342 dev_warn(&h->pdev->dev,
285 HPSA "%d: report LUN data changed\n", h->ctlr); 343 "%s: report LUN data changed\n", h->devname);
286 /* 344 /*
287 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external 345 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
288 * target (array) devices. 346 * target (array) devices.
289 */ 347 */
290 break; 348 break;
291 case POWER_OR_RESET: 349 case POWER_OR_RESET:
292 dev_warn(&h->pdev->dev, HPSA "%d: a power on " 350 dev_warn(&h->pdev->dev,
293 "or device reset detected\n", h->ctlr); 351 "%s: a power on or device reset detected\n",
352 h->devname);
294 break; 353 break;
295 case UNIT_ATTENTION_CLEARED: 354 case UNIT_ATTENTION_CLEARED:
296 dev_warn(&h->pdev->dev, HPSA "%d: unit attention " 355 dev_warn(&h->pdev->dev,
297 "cleared by another initiator\n", h->ctlr); 356 "%s: unit attention cleared by another initiator\n",
357 h->devname);
298 break; 358 break;
299 default: 359 default:
300 dev_warn(&h->pdev->dev, HPSA "%d: unknown " 360 dev_warn(&h->pdev->dev,
301 "unit attention detected\n", h->ctlr); 361 "%s: unknown unit attention detected\n",
362 h->devname);
302 break; 363 break;
303 } 364 }
304 return 1; 365 return 1;
@@ -314,6 +375,20 @@ static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
314 return 1; 375 return 1;
315} 376}
316 377
378static u32 lockup_detected(struct ctlr_info *h);
379static ssize_t host_show_lockup_detected(struct device *dev,
380 struct device_attribute *attr, char *buf)
381{
382 int ld;
383 struct ctlr_info *h;
384 struct Scsi_Host *shost = class_to_shost(dev);
385
386 h = shost_to_hba(shost);
387 ld = lockup_detected(h);
388
389 return sprintf(buf, "ld=%d\n", ld);
390}
391
317static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, 392static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
318 struct device_attribute *attr, 393 struct device_attribute *attr,
319 const char *buf, size_t count) 394 const char *buf, size_t count)
@@ -425,7 +500,7 @@ static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
425/* List of controllers which cannot be hard reset on kexec with reset_devices */ 500/* List of controllers which cannot be hard reset on kexec with reset_devices */
426static u32 unresettable_controller[] = { 501static u32 unresettable_controller[] = {
427 0x324a103C, /* Smart Array P712m */ 502 0x324a103C, /* Smart Array P712m */
428 0x324b103C, /* SmartArray P711m */ 503 0x324b103C, /* Smart Array P711m */
429 0x3223103C, /* Smart Array P800 */ 504 0x3223103C, /* Smart Array P800 */
430 0x3234103C, /* Smart Array P400 */ 505 0x3234103C, /* Smart Array P400 */
431 0x3235103C, /* Smart Array P400i */ 506 0x3235103C, /* Smart Array P400i */
@@ -467,24 +542,32 @@ static u32 soft_unresettable_controller[] = {
467 0x409D0E11, /* Smart Array 6400 EM */ 542 0x409D0E11, /* Smart Array 6400 EM */
468}; 543};
469 544
470static int ctlr_is_hard_resettable(u32 board_id) 545static u32 needs_abort_tags_swizzled[] = {
546 0x323D103C, /* Smart Array P700m */
547 0x324a103C, /* Smart Array P712m */
548 0x324b103C, /* SmartArray P711m */
549};
550
551static int board_id_in_array(u32 a[], int nelems, u32 board_id)
471{ 552{
472 int i; 553 int i;
473 554
474 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 555 for (i = 0; i < nelems; i++)
475 if (unresettable_controller[i] == board_id) 556 if (a[i] == board_id)
476 return 0; 557 return 1;
477 return 1; 558 return 0;
478} 559}
479 560
480static int ctlr_is_soft_resettable(u32 board_id) 561static int ctlr_is_hard_resettable(u32 board_id)
481{ 562{
482 int i; 563 return !board_id_in_array(unresettable_controller,
564 ARRAY_SIZE(unresettable_controller), board_id);
565}
483 566
484 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) 567static int ctlr_is_soft_resettable(u32 board_id)
485 if (soft_unresettable_controller[i] == board_id) 568{
486 return 0; 569 return !board_id_in_array(soft_unresettable_controller,
487 return 1; 570 ARRAY_SIZE(soft_unresettable_controller), board_id);
488} 571}
489 572
490static int ctlr_is_resettable(u32 board_id) 573static int ctlr_is_resettable(u32 board_id)
@@ -493,6 +576,12 @@ static int ctlr_is_resettable(u32 board_id)
493 ctlr_is_soft_resettable(board_id); 576 ctlr_is_soft_resettable(board_id);
494} 577}
495 578
579static int ctlr_needs_abort_tags_swizzled(u32 board_id)
580{
581 return board_id_in_array(needs_abort_tags_swizzled,
582 ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
583}
584
496static ssize_t host_show_resettable(struct device *dev, 585static ssize_t host_show_resettable(struct device *dev,
497 struct device_attribute *attr, char *buf) 586 struct device_attribute *attr, char *buf)
498{ 587{
@@ -647,12 +736,15 @@ static DEVICE_ATTR(transport_mode, S_IRUGO,
647 host_show_transport_mode, NULL); 736 host_show_transport_mode, NULL);
648static DEVICE_ATTR(resettable, S_IRUGO, 737static DEVICE_ATTR(resettable, S_IRUGO,
649 host_show_resettable, NULL); 738 host_show_resettable, NULL);
739static DEVICE_ATTR(lockup_detected, S_IRUGO,
740 host_show_lockup_detected, NULL);
650 741
651static struct device_attribute *hpsa_sdev_attrs[] = { 742static struct device_attribute *hpsa_sdev_attrs[] = {
652 &dev_attr_raid_level, 743 &dev_attr_raid_level,
653 &dev_attr_lunid, 744 &dev_attr_lunid,
654 &dev_attr_unique_id, 745 &dev_attr_unique_id,
655 &dev_attr_hp_ssd_smart_path_enabled, 746 &dev_attr_hp_ssd_smart_path_enabled,
747 &dev_attr_lockup_detected,
656 NULL, 748 NULL,
657}; 749};
658 750
@@ -667,6 +759,9 @@ static struct device_attribute *hpsa_shost_attrs[] = {
667 NULL, 759 NULL,
668}; 760};
669 761
762#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
763 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
764
670static struct scsi_host_template hpsa_driver_template = { 765static struct scsi_host_template hpsa_driver_template = {
671 .module = THIS_MODULE, 766 .module = THIS_MODULE,
672 .name = HPSA, 767 .name = HPSA,
@@ -681,6 +776,7 @@ static struct scsi_host_template hpsa_driver_template = {
681 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 776 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
682 .ioctl = hpsa_ioctl, 777 .ioctl = hpsa_ioctl,
683 .slave_alloc = hpsa_slave_alloc, 778 .slave_alloc = hpsa_slave_alloc,
779 .slave_configure = hpsa_slave_configure,
684 .slave_destroy = hpsa_slave_destroy, 780 .slave_destroy = hpsa_slave_destroy,
685#ifdef CONFIG_COMPAT 781#ifdef CONFIG_COMPAT
686 .compat_ioctl = hpsa_compat_ioctl, 782 .compat_ioctl = hpsa_compat_ioctl,
@@ -743,30 +839,43 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
743 * a separate special register for submitting commands. 839 * a separate special register for submitting commands.
744 */ 840 */
745 841
746/* set_performant_mode: Modify the tag for cciss performant 842/*
843 * set_performant_mode: Modify the tag for cciss performant
747 * set bit 0 for pull model, bits 3-1 for block fetch 844 * set bit 0 for pull model, bits 3-1 for block fetch
748 * register number 845 * register number
749 */ 846 */
750static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) 847#define DEFAULT_REPLY_QUEUE (-1)
848static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
849 int reply_queue)
751{ 850{
752 if (likely(h->transMethod & CFGTBL_Trans_Performant)) { 851 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
753 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 852 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
754 if (likely(h->msix_vector > 0)) 853 if (unlikely(!h->msix_vector))
854 return;
855 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
755 c->Header.ReplyQueue = 856 c->Header.ReplyQueue =
756 raw_smp_processor_id() % h->nreply_queues; 857 raw_smp_processor_id() % h->nreply_queues;
858 else
859 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
757 } 860 }
758} 861}
759 862
760static void set_ioaccel1_performant_mode(struct ctlr_info *h, 863static void set_ioaccel1_performant_mode(struct ctlr_info *h,
761 struct CommandList *c) 864 struct CommandList *c,
865 int reply_queue)
762{ 866{
763 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 867 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
764 868
765 /* Tell the controller to post the reply to the queue for this 869 /*
870 * Tell the controller to post the reply to the queue for this
766 * processor. This seems to give the best I/O throughput. 871 * processor. This seems to give the best I/O throughput.
767 */ 872 */
768 cp->ReplyQueue = smp_processor_id() % h->nreply_queues; 873 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
769 /* Set the bits in the address sent down to include: 874 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
875 else
876 cp->ReplyQueue = reply_queue % h->nreply_queues;
877 /*
878 * Set the bits in the address sent down to include:
770 * - performant mode bit (bit 0) 879 * - performant mode bit (bit 0)
771 * - pull count (bits 1-3) 880 * - pull count (bits 1-3)
772 * - command type (bits 4-6) 881 * - command type (bits 4-6)
@@ -775,20 +884,48 @@ static void set_ioaccel1_performant_mode(struct ctlr_info *h,
775 IOACCEL1_BUSADDR_CMDTYPE; 884 IOACCEL1_BUSADDR_CMDTYPE;
776} 885}
777 886
778static void set_ioaccel2_performant_mode(struct ctlr_info *h, 887static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
779 struct CommandList *c) 888 struct CommandList *c,
889 int reply_queue)
780{ 890{
781 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 891 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
892 &h->ioaccel2_cmd_pool[c->cmdindex];
782 893
783 /* Tell the controller to post the reply to the queue for this 894 /* Tell the controller to post the reply to the queue for this
784 * processor. This seems to give the best I/O throughput. 895 * processor. This seems to give the best I/O throughput.
785 */ 896 */
786 cp->reply_queue = smp_processor_id() % h->nreply_queues; 897 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
898 cp->reply_queue = smp_processor_id() % h->nreply_queues;
899 else
900 cp->reply_queue = reply_queue % h->nreply_queues;
787 /* Set the bits in the address sent down to include: 901 /* Set the bits in the address sent down to include:
788 * - performant mode bit not used in ioaccel mode 2 902 * - performant mode bit not used in ioaccel mode 2
789 * - pull count (bits 0-3) 903 * - pull count (bits 0-3)
790 * - command type isn't needed for ioaccel2 904 * - command type isn't needed for ioaccel2
791 */ 905 */
906 c->busaddr |= h->ioaccel2_blockFetchTable[0];
907}
908
909static void set_ioaccel2_performant_mode(struct ctlr_info *h,
910 struct CommandList *c,
911 int reply_queue)
912{
913 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
914
915 /*
916 * Tell the controller to post the reply to the queue for this
917 * processor. This seems to give the best I/O throughput.
918 */
919 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
920 cp->reply_queue = smp_processor_id() % h->nreply_queues;
921 else
922 cp->reply_queue = reply_queue % h->nreply_queues;
923 /*
924 * Set the bits in the address sent down to include:
925 * - performant mode bit not used in ioaccel mode 2
926 * - pull count (bits 0-3)
927 * - command type isn't needed for ioaccel2
928 */
792 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); 929 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
793} 930}
794 931
@@ -821,26 +958,38 @@ static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
821 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 958 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
822} 959}
823 960
824static void enqueue_cmd_and_start_io(struct ctlr_info *h, 961static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
825 struct CommandList *c) 962 struct CommandList *c, int reply_queue)
826{ 963{
827 dial_down_lockup_detection_during_fw_flash(h, c); 964 dial_down_lockup_detection_during_fw_flash(h, c);
828 atomic_inc(&h->commands_outstanding); 965 atomic_inc(&h->commands_outstanding);
829 switch (c->cmd_type) { 966 switch (c->cmd_type) {
830 case CMD_IOACCEL1: 967 case CMD_IOACCEL1:
831 set_ioaccel1_performant_mode(h, c); 968 set_ioaccel1_performant_mode(h, c, reply_queue);
832 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 969 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
833 break; 970 break;
834 case CMD_IOACCEL2: 971 case CMD_IOACCEL2:
835 set_ioaccel2_performant_mode(h, c); 972 set_ioaccel2_performant_mode(h, c, reply_queue);
973 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
974 break;
975 case IOACCEL2_TMF:
976 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
836 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); 977 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
837 break; 978 break;
838 default: 979 default:
839 set_performant_mode(h, c); 980 set_performant_mode(h, c, reply_queue);
840 h->access.submit_command(h, c); 981 h->access.submit_command(h, c);
841 } 982 }
842} 983}
843 984
985static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
986{
987 if (unlikely(hpsa_is_pending_event(c)))
988 return finish_cmd(c);
989
990 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
991}
992
844static inline int is_hba_lunid(unsigned char scsi3addr[]) 993static inline int is_hba_lunid(unsigned char scsi3addr[])
845{ 994{
846 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 995 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
@@ -881,6 +1030,23 @@ static int hpsa_find_target_lun(struct ctlr_info *h,
881 return !found; 1030 return !found;
882} 1031}
883 1032
1033static inline void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1034 struct hpsa_scsi_dev_t *dev, char *description)
1035{
1036 dev_printk(level, &h->pdev->dev,
1037 "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
1038 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1039 description,
1040 scsi_device_type(dev->devtype),
1041 dev->vendor,
1042 dev->model,
1043 dev->raid_level > RAID_UNKNOWN ?
1044 "RAID-?" : raid_label[dev->raid_level],
1045 dev->offload_config ? '+' : '-',
1046 dev->offload_enabled ? '+' : '-',
1047 dev->expose_state);
1048}
1049
884/* Add an entry into h->dev[] array. */ 1050/* Add an entry into h->dev[] array. */
885static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, 1051static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
886 struct hpsa_scsi_dev_t *device, 1052 struct hpsa_scsi_dev_t *device,
@@ -948,15 +1114,10 @@ lun_assigned:
948 h->ndevices++; 1114 h->ndevices++;
949 added[*nadded] = device; 1115 added[*nadded] = device;
950 (*nadded)++; 1116 (*nadded)++;
951 1117 hpsa_show_dev_msg(KERN_INFO, h, device,
952 /* initially, (before registering with scsi layer) we don't 1118 device->expose_state & HPSA_SCSI_ADD ? "added" : "masked");
953 * know our hostno and we don't want to print anything first 1119 device->offload_to_be_enabled = device->offload_enabled;
954 * time anyway (the scsi layer's inquiries will show that info) 1120 device->offload_enabled = 0;
955 */
956 /* if (hostno != -1) */
957 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
958 scsi_device_type(device->devtype), hostno,
959 device->bus, device->target, device->lun);
960 return 0; 1121 return 0;
961} 1122}
962 1123
@@ -964,6 +1125,7 @@ lun_assigned:
964static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, 1125static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
965 int entry, struct hpsa_scsi_dev_t *new_entry) 1126 int entry, struct hpsa_scsi_dev_t *new_entry)
966{ 1127{
1128 int offload_enabled;
967 /* assumes h->devlock is held */ 1129 /* assumes h->devlock is held */
968 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 1130 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
969 1131
@@ -982,16 +1144,29 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
982 */ 1144 */
983 h->dev[entry]->raid_map = new_entry->raid_map; 1145 h->dev[entry]->raid_map = new_entry->raid_map;
984 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; 1146 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
985 wmb(); /* ensure raid map updated prior to ->offload_enabled */
986 } 1147 }
1148 if (new_entry->hba_ioaccel_enabled) {
1149 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1150 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1151 }
1152 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
987 h->dev[entry]->offload_config = new_entry->offload_config; 1153 h->dev[entry]->offload_config = new_entry->offload_config;
988 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; 1154 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
989 h->dev[entry]->offload_enabled = new_entry->offload_enabled;
990 h->dev[entry]->queue_depth = new_entry->queue_depth; 1155 h->dev[entry]->queue_depth = new_entry->queue_depth;
991 1156
992 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", 1157 /*
993 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 1158 * We can turn off ioaccel offload now, but need to delay turning
994 new_entry->target, new_entry->lun); 1159 * it on until we can update h->dev[entry]->phys_disk[], but we
1160 * can't do that until all the devices are updated.
1161 */
1162 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1163 if (!new_entry->offload_enabled)
1164 h->dev[entry]->offload_enabled = 0;
1165
1166 offload_enabled = h->dev[entry]->offload_enabled;
1167 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
1168 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1169 h->dev[entry]->offload_enabled = offload_enabled;
995} 1170}
996 1171
997/* Replace an entry from h->dev[] array. */ 1172/* Replace an entry from h->dev[] array. */
@@ -1017,9 +1192,9 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
1017 h->dev[entry] = new_entry; 1192 h->dev[entry] = new_entry;
1018 added[*nadded] = new_entry; 1193 added[*nadded] = new_entry;
1019 (*nadded)++; 1194 (*nadded)++;
1020 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", 1195 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1021 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 1196 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1022 new_entry->target, new_entry->lun); 1197 new_entry->offload_enabled = 0;
1023} 1198}
1024 1199
1025/* Remove an entry from h->dev[] array. */ 1200/* Remove an entry from h->dev[] array. */
@@ -1039,9 +1214,7 @@ static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1039 for (i = entry; i < h->ndevices-1; i++) 1214 for (i = entry; i < h->ndevices-1; i++)
1040 h->dev[i] = h->dev[i+1]; 1215 h->dev[i] = h->dev[i+1];
1041 h->ndevices--; 1216 h->ndevices--;
1042 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", 1217 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1043 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
1044 sd->lun);
1045} 1218}
1046 1219
1047#define SCSI3ADDR_EQ(a, b) ( \ 1220#define SCSI3ADDR_EQ(a, b) ( \
@@ -1283,6 +1456,8 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1283 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES) 1456 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1284 nraid_map_entries = RAID_MAP_MAX_ENTRIES; 1457 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1285 1458
1459 logical_drive->nphysical_disks = nraid_map_entries;
1460
1286 qdepth = 0; 1461 qdepth = 0;
1287 for (i = 0; i < nraid_map_entries; i++) { 1462 for (i = 0; i < nraid_map_entries; i++) {
1288 logical_drive->phys_disk[i] = NULL; 1463 logical_drive->phys_disk[i] = NULL;
@@ -1312,7 +1487,8 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1312 */ 1487 */
1313 if (!logical_drive->phys_disk[i]) { 1488 if (!logical_drive->phys_disk[i]) {
1314 logical_drive->offload_enabled = 0; 1489 logical_drive->offload_enabled = 0;
1315 logical_drive->queue_depth = h->nr_cmds; 1490 logical_drive->offload_to_be_enabled = 0;
1491 logical_drive->queue_depth = 8;
1316 } 1492 }
1317 } 1493 }
1318 if (nraid_map_entries) 1494 if (nraid_map_entries)
@@ -1335,6 +1511,16 @@ static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1335 continue; 1511 continue;
1336 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr)) 1512 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1337 continue; 1513 continue;
1514
1515 /*
1516 * If offload is currently enabled, the RAID map and
1517 * phys_disk[] assignment *better* not be changing
1518 * and since it isn't changing, we do not need to
1519 * update it.
1520 */
1521 if (dev[i]->offload_enabled)
1522 continue;
1523
1338 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); 1524 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1339 } 1525 }
1340} 1526}
@@ -1411,9 +1597,7 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1411 */ 1597 */
1412 if (sd[i]->volume_offline) { 1598 if (sd[i]->volume_offline) {
1413 hpsa_show_volume_status(h, sd[i]); 1599 hpsa_show_volume_status(h, sd[i]);
1414 dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n", 1600 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1415 h->scsi_host->host_no,
1416 sd[i]->bus, sd[i]->target, sd[i]->lun);
1417 continue; 1601 continue;
1418 } 1602 }
1419 1603
@@ -1433,6 +1617,14 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1433 /* but if it does happen, we just ignore that device */ 1617 /* but if it does happen, we just ignore that device */
1434 } 1618 }
1435 } 1619 }
1620 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1621
1622 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1623 * any logical drives that need it enabled.
1624 */
1625 for (i = 0; i < h->ndevices; i++)
1626 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1627
1436 spin_unlock_irqrestore(&h->devlock, flags); 1628 spin_unlock_irqrestore(&h->devlock, flags);
1437 1629
1438 /* Monitor devices which are in one of several NOT READY states to be 1630 /* Monitor devices which are in one of several NOT READY states to be
@@ -1456,20 +1648,22 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1456 sh = h->scsi_host; 1648 sh = h->scsi_host;
1457 /* Notify scsi mid layer of any removed devices */ 1649 /* Notify scsi mid layer of any removed devices */
1458 for (i = 0; i < nremoved; i++) { 1650 for (i = 0; i < nremoved; i++) {
1459 struct scsi_device *sdev = 1651 if (removed[i]->expose_state & HPSA_SCSI_ADD) {
1460 scsi_device_lookup(sh, removed[i]->bus, 1652 struct scsi_device *sdev =
1461 removed[i]->target, removed[i]->lun); 1653 scsi_device_lookup(sh, removed[i]->bus,
1462 if (sdev != NULL) { 1654 removed[i]->target, removed[i]->lun);
1463 scsi_remove_device(sdev); 1655 if (sdev != NULL) {
1464 scsi_device_put(sdev); 1656 scsi_remove_device(sdev);
1465 } else { 1657 scsi_device_put(sdev);
1466 /* We don't expect to get here. 1658 } else {
1467 * future cmds to this device will get selection 1659 /*
1468 * timeout as if the device was gone. 1660 * We don't expect to get here.
1469 */ 1661 * future cmds to this device will get selection
1470 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " 1662 * timeout as if the device was gone.
1471 " for removal.", hostno, removed[i]->bus, 1663 */
1472 removed[i]->target, removed[i]->lun); 1664 hpsa_show_dev_msg(KERN_WARNING, h, removed[i],
1665 "didn't find device for removal.");
1666 }
1473 } 1667 }
1474 kfree(removed[i]); 1668 kfree(removed[i]);
1475 removed[i] = NULL; 1669 removed[i] = NULL;
@@ -1477,16 +1671,18 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1477 1671
1478 /* Notify scsi mid layer of any added devices */ 1672 /* Notify scsi mid layer of any added devices */
1479 for (i = 0; i < nadded; i++) { 1673 for (i = 0; i < nadded; i++) {
1674 if (!(added[i]->expose_state & HPSA_SCSI_ADD))
1675 continue;
1480 if (scsi_add_device(sh, added[i]->bus, 1676 if (scsi_add_device(sh, added[i]->bus,
1481 added[i]->target, added[i]->lun) == 0) 1677 added[i]->target, added[i]->lun) == 0)
1482 continue; 1678 continue;
1483 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " 1679 hpsa_show_dev_msg(KERN_WARNING, h, added[i],
1484 "device not added.\n", hostno, added[i]->bus, 1680 "addition failed, device not added.");
1485 added[i]->target, added[i]->lun);
1486 /* now we have to remove it from h->dev, 1681 /* now we have to remove it from h->dev,
1487 * since it didn't get added to scsi mid layer 1682 * since it didn't get added to scsi mid layer
1488 */ 1683 */
1489 fixup_botched_add(h, added[i]); 1684 fixup_botched_add(h, added[i]);
1685 added[i] = NULL;
1490 } 1686 }
1491 1687
1492free_and_out: 1688free_and_out:
@@ -1512,7 +1708,6 @@ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1512 return NULL; 1708 return NULL;
1513} 1709}
1514 1710
1515/* link sdev->hostdata to our per-device structure. */
1516static int hpsa_slave_alloc(struct scsi_device *sdev) 1711static int hpsa_slave_alloc(struct scsi_device *sdev)
1517{ 1712{
1518 struct hpsa_scsi_dev_t *sd; 1713 struct hpsa_scsi_dev_t *sd;
@@ -1523,21 +1718,80 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
1523 spin_lock_irqsave(&h->devlock, flags); 1718 spin_lock_irqsave(&h->devlock, flags);
1524 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 1719 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1525 sdev_id(sdev), sdev->lun); 1720 sdev_id(sdev), sdev->lun);
1526 if (sd != NULL) { 1721 if (likely(sd)) {
1527 sdev->hostdata = sd;
1528 if (sd->queue_depth)
1529 scsi_change_queue_depth(sdev, sd->queue_depth);
1530 atomic_set(&sd->ioaccel_cmds_out, 0); 1722 atomic_set(&sd->ioaccel_cmds_out, 0);
1531 } 1723 sdev->hostdata = (sd->expose_state & HPSA_SCSI_ADD) ? sd : NULL;
1724 } else
1725 sdev->hostdata = NULL;
1532 spin_unlock_irqrestore(&h->devlock, flags); 1726 spin_unlock_irqrestore(&h->devlock, flags);
1533 return 0; 1727 return 0;
1534} 1728}
1535 1729
1730/* configure scsi device based on internal per-device structure */
1731static int hpsa_slave_configure(struct scsi_device *sdev)
1732{
1733 struct hpsa_scsi_dev_t *sd;
1734 int queue_depth;
1735
1736 sd = sdev->hostdata;
1737 sdev->no_uld_attach = !sd || !(sd->expose_state & HPSA_ULD_ATTACH);
1738
1739 if (sd)
1740 queue_depth = sd->queue_depth != 0 ?
1741 sd->queue_depth : sdev->host->can_queue;
1742 else
1743 queue_depth = sdev->host->can_queue;
1744
1745 scsi_change_queue_depth(sdev, queue_depth);
1746
1747 return 0;
1748}
1749
1536static void hpsa_slave_destroy(struct scsi_device *sdev) 1750static void hpsa_slave_destroy(struct scsi_device *sdev)
1537{ 1751{
1538 /* nothing to do. */ 1752 /* nothing to do. */
1539} 1753}
1540 1754
1755static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1756{
1757 int i;
1758
1759 if (!h->ioaccel2_cmd_sg_list)
1760 return;
1761 for (i = 0; i < h->nr_cmds; i++) {
1762 kfree(h->ioaccel2_cmd_sg_list[i]);
1763 h->ioaccel2_cmd_sg_list[i] = NULL;
1764 }
1765 kfree(h->ioaccel2_cmd_sg_list);
1766 h->ioaccel2_cmd_sg_list = NULL;
1767}
1768
1769static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1770{
1771 int i;
1772
1773 if (h->chainsize <= 0)
1774 return 0;
1775
1776 h->ioaccel2_cmd_sg_list =
1777 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
1778 GFP_KERNEL);
1779 if (!h->ioaccel2_cmd_sg_list)
1780 return -ENOMEM;
1781 for (i = 0; i < h->nr_cmds; i++) {
1782 h->ioaccel2_cmd_sg_list[i] =
1783 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
1784 h->maxsgentries, GFP_KERNEL);
1785 if (!h->ioaccel2_cmd_sg_list[i])
1786 goto clean;
1787 }
1788 return 0;
1789
1790clean:
1791 hpsa_free_ioaccel2_sg_chain_blocks(h);
1792 return -ENOMEM;
1793}
1794
1541static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 1795static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1542{ 1796{
1543 int i; 1797 int i;
@@ -1552,7 +1806,7 @@ static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1552 h->cmd_sg_list = NULL; 1806 h->cmd_sg_list = NULL;
1553} 1807}
1554 1808
1555static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) 1809static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
1556{ 1810{
1557 int i; 1811 int i;
1558 1812
@@ -1580,6 +1834,39 @@ clean:
1580 return -ENOMEM; 1834 return -ENOMEM;
1581} 1835}
1582 1836
1837static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
1838 struct io_accel2_cmd *cp, struct CommandList *c)
1839{
1840 struct ioaccel2_sg_element *chain_block;
1841 u64 temp64;
1842 u32 chain_size;
1843
1844 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
1845 chain_size = le32_to_cpu(cp->data_len);
1846 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
1847 PCI_DMA_TODEVICE);
1848 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1849 /* prevent subsequent unmapping */
1850 cp->sg->address = 0;
1851 return -1;
1852 }
1853 cp->sg->address = cpu_to_le64(temp64);
1854 return 0;
1855}
1856
1857static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
1858 struct io_accel2_cmd *cp)
1859{
1860 struct ioaccel2_sg_element *chain_sg;
1861 u64 temp64;
1862 u32 chain_size;
1863
1864 chain_sg = cp->sg;
1865 temp64 = le64_to_cpu(chain_sg->address);
1866 chain_size = le32_to_cpu(cp->data_len);
1867 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
1868}
1869
1583static int hpsa_map_sg_chain_block(struct ctlr_info *h, 1870static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1584 struct CommandList *c) 1871 struct CommandList *c)
1585{ 1872{
@@ -1629,6 +1916,7 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1629{ 1916{
1630 int data_len; 1917 int data_len;
1631 int retry = 0; 1918 int retry = 0;
1919 u32 ioaccel2_resid = 0;
1632 1920
1633 switch (c2->error_data.serv_response) { 1921 switch (c2->error_data.serv_response) {
1634 case IOACCEL2_SERV_RESPONSE_COMPLETE: 1922 case IOACCEL2_SERV_RESPONSE_COMPLETE:
@@ -1636,9 +1924,6 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1636 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: 1924 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1637 break; 1925 break;
1638 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: 1926 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1639 dev_warn(&h->pdev->dev,
1640 "%s: task complete with check condition.\n",
1641 "HP SSD Smart Path");
1642 cmd->result |= SAM_STAT_CHECK_CONDITION; 1927 cmd->result |= SAM_STAT_CHECK_CONDITION;
1643 if (c2->error_data.data_present != 1928 if (c2->error_data.data_present !=
1644 IOACCEL2_SENSE_DATA_PRESENT) { 1929 IOACCEL2_SENSE_DATA_PRESENT) {
@@ -1658,58 +1943,56 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1658 retry = 1; 1943 retry = 1;
1659 break; 1944 break;
1660 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: 1945 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1661 dev_warn(&h->pdev->dev,
1662 "%s: task complete with BUSY status.\n",
1663 "HP SSD Smart Path");
1664 retry = 1; 1946 retry = 1;
1665 break; 1947 break;
1666 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: 1948 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1667 dev_warn(&h->pdev->dev,
1668 "%s: task complete with reservation conflict.\n",
1669 "HP SSD Smart Path");
1670 retry = 1; 1949 retry = 1;
1671 break; 1950 break;
1672 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: 1951 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1673 /* Make scsi midlayer do unlimited retries */ 1952 retry = 1;
1674 cmd->result = DID_IMM_RETRY << 16;
1675 break; 1953 break;
1676 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: 1954 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1677 dev_warn(&h->pdev->dev,
1678 "%s: task complete with aborted status.\n",
1679 "HP SSD Smart Path");
1680 retry = 1; 1955 retry = 1;
1681 break; 1956 break;
1682 default: 1957 default:
1683 dev_warn(&h->pdev->dev,
1684 "%s: task complete with unrecognized status: 0x%02x\n",
1685 "HP SSD Smart Path", c2->error_data.status);
1686 retry = 1; 1958 retry = 1;
1687 break; 1959 break;
1688 } 1960 }
1689 break; 1961 break;
1690 case IOACCEL2_SERV_RESPONSE_FAILURE: 1962 case IOACCEL2_SERV_RESPONSE_FAILURE:
1691 /* don't expect to get here. */ 1963 switch (c2->error_data.status) {
1692 dev_warn(&h->pdev->dev, 1964 case IOACCEL2_STATUS_SR_IO_ERROR:
1693 "unexpected delivery or target failure, status = 0x%02x\n", 1965 case IOACCEL2_STATUS_SR_IO_ABORTED:
1694 c2->error_data.status); 1966 case IOACCEL2_STATUS_SR_OVERRUN:
1695 retry = 1; 1967 retry = 1;
1968 break;
1969 case IOACCEL2_STATUS_SR_UNDERRUN:
1970 cmd->result = (DID_OK << 16); /* host byte */
1971 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1972 ioaccel2_resid = get_unaligned_le32(
1973 &c2->error_data.resid_cnt[0]);
1974 scsi_set_resid(cmd, ioaccel2_resid);
1975 break;
1976 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
1977 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
1978 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
1979 /* We will get an event from ctlr to trigger rescan */
1980 retry = 1;
1981 break;
1982 default:
1983 retry = 1;
1984 }
1696 break; 1985 break;
1697 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: 1986 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1698 break; 1987 break;
1699 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: 1988 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1700 break; 1989 break;
1701 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: 1990 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1702 dev_warn(&h->pdev->dev, "task management function rejected.\n");
1703 retry = 1; 1991 retry = 1;
1704 break; 1992 break;
1705 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: 1993 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1706 dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1707 break; 1994 break;
1708 default: 1995 default:
1709 dev_warn(&h->pdev->dev,
1710 "%s: Unrecognized server response: 0x%02x\n",
1711 "HP SSD Smart Path",
1712 c2->error_data.serv_response);
1713 retry = 1; 1996 retry = 1;
1714 break; 1997 break;
1715 } 1998 }
@@ -1717,6 +2000,87 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1717 return retry; /* retry on raid path? */ 2000 return retry; /* retry on raid path? */
1718} 2001}
1719 2002
2003static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2004 struct CommandList *c)
2005{
2006 bool do_wake = false;
2007
2008 /*
2009 * Prevent the following race in the abort handler:
2010 *
2011 * 1. LLD is requested to abort a SCSI command
2012 * 2. The SCSI command completes
2013 * 3. The struct CommandList associated with step 2 is made available
2014 * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2015 * 5. Abort handler follows scsi_cmnd->host_scribble and
2016 * finds struct CommandList and tries to aborts it
2017 * Now we have aborted the wrong command.
2018 *
2019 * Reset c->scsi_cmd here so that the abort or reset handler will know
2020 * this command has completed. Then, check to see if the handler is
2021 * waiting for this command, and, if so, wake it.
2022 */
2023 c->scsi_cmd = SCSI_CMD_IDLE;
2024 mb(); /* Declare command idle before checking for pending events. */
2025 if (c->abort_pending) {
2026 do_wake = true;
2027 c->abort_pending = false;
2028 }
2029 if (c->reset_pending) {
2030 unsigned long flags;
2031 struct hpsa_scsi_dev_t *dev;
2032
2033 /*
2034 * There appears to be a reset pending; lock the lock and
2035 * reconfirm. If so, then decrement the count of outstanding
2036 * commands and wake the reset command if this is the last one.
2037 */
2038 spin_lock_irqsave(&h->lock, flags);
2039 dev = c->reset_pending; /* Re-fetch under the lock. */
2040 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2041 do_wake = true;
2042 c->reset_pending = NULL;
2043 spin_unlock_irqrestore(&h->lock, flags);
2044 }
2045
2046 if (do_wake)
2047 wake_up_all(&h->event_sync_wait_queue);
2048}
2049
2050static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2051 struct CommandList *c)
2052{
2053 hpsa_cmd_resolve_events(h, c);
2054 cmd_tagged_free(h, c);
2055}
2056
2057static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2058 struct CommandList *c, struct scsi_cmnd *cmd)
2059{
2060 hpsa_cmd_resolve_and_free(h, c);
2061 cmd->scsi_done(cmd);
2062}
2063
2064static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2065{
2066 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2067 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2068}
2069
2070static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2071{
2072 cmd->result = DID_ABORT << 16;
2073}
2074
2075static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2076 struct scsi_cmnd *cmd)
2077{
2078 hpsa_set_scsi_cmd_aborted(cmd);
2079 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2080 c->Request.CDB, c->err_info->ScsiStatus);
2081 hpsa_cmd_resolve_and_free(h, c);
2082}
2083
1720static void process_ioaccel2_completion(struct ctlr_info *h, 2084static void process_ioaccel2_completion(struct ctlr_info *h,
1721 struct CommandList *c, struct scsi_cmnd *cmd, 2085 struct CommandList *c, struct scsi_cmnd *cmd,
1722 struct hpsa_scsi_dev_t *dev) 2086 struct hpsa_scsi_dev_t *dev)
@@ -1725,13 +2089,11 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
1725 2089
1726 /* check for good status */ 2090 /* check for good status */
1727 if (likely(c2->error_data.serv_response == 0 && 2091 if (likely(c2->error_data.serv_response == 0 &&
1728 c2->error_data.status == 0)) { 2092 c2->error_data.status == 0))
1729 cmd_free(h, c); 2093 return hpsa_cmd_free_and_done(h, c, cmd);
1730 cmd->scsi_done(cmd);
1731 return;
1732 }
1733 2094
1734 /* Any RAID offload error results in retry which will use 2095 /*
2096 * Any RAID offload error results in retry which will use
1735 * the normal I/O path so the controller can handle whatever's 2097 * the normal I/O path so the controller can handle whatever's
1736 * wrong. 2098 * wrong.
1737 */ 2099 */
@@ -1741,19 +2103,42 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
1741 if (c2->error_data.status == 2103 if (c2->error_data.status ==
1742 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) 2104 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
1743 dev->offload_enabled = 0; 2105 dev->offload_enabled = 0;
1744 goto retry_cmd; 2106
2107 return hpsa_retry_cmd(h, c);
1745 } 2108 }
1746 2109
1747 if (handle_ioaccel_mode2_error(h, c, cmd, c2)) 2110 if (handle_ioaccel_mode2_error(h, c, cmd, c2))
1748 goto retry_cmd; 2111 return hpsa_retry_cmd(h, c);
1749 2112
1750 cmd_free(h, c); 2113 return hpsa_cmd_free_and_done(h, c, cmd);
1751 cmd->scsi_done(cmd); 2114}
1752 return;
1753 2115
1754retry_cmd: 2116/* Returns 0 on success, < 0 otherwise. */
1755 INIT_WORK(&c->work, hpsa_command_resubmit_worker); 2117static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
1756 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); 2118 struct CommandList *cp)
2119{
2120 u8 tmf_status = cp->err_info->ScsiStatus;
2121
2122 switch (tmf_status) {
2123 case CISS_TMF_COMPLETE:
2124 /*
2125 * CISS_TMF_COMPLETE never happens, instead,
2126 * ei->CommandStatus == 0 for this case.
2127 */
2128 case CISS_TMF_SUCCESS:
2129 return 0;
2130 case CISS_TMF_INVALID_FRAME:
2131 case CISS_TMF_NOT_SUPPORTED:
2132 case CISS_TMF_FAILED:
2133 case CISS_TMF_WRONG_LUN:
2134 case CISS_TMF_OVERLAPPED_TAG:
2135 break;
2136 default:
2137 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2138 tmf_status);
2139 break;
2140 }
2141 return -tmf_status;
1757} 2142}
1758 2143
1759static void complete_scsi_command(struct CommandList *cp) 2144static void complete_scsi_command(struct CommandList *cp)
@@ -1762,51 +2147,58 @@ static void complete_scsi_command(struct CommandList *cp)
1762 struct ctlr_info *h; 2147 struct ctlr_info *h;
1763 struct ErrorInfo *ei; 2148 struct ErrorInfo *ei;
1764 struct hpsa_scsi_dev_t *dev; 2149 struct hpsa_scsi_dev_t *dev;
2150 struct io_accel2_cmd *c2;
1765 2151
1766 unsigned char sense_key; 2152 u8 sense_key;
1767 unsigned char asc; /* additional sense code */ 2153 u8 asc; /* additional sense code */
1768 unsigned char ascq; /* additional sense code qualifier */ 2154 u8 ascq; /* additional sense code qualifier */
1769 unsigned long sense_data_size; 2155 unsigned long sense_data_size;
1770 2156
1771 ei = cp->err_info; 2157 ei = cp->err_info;
1772 cmd = cp->scsi_cmd; 2158 cmd = cp->scsi_cmd;
1773 h = cp->h; 2159 h = cp->h;
1774 dev = cmd->device->hostdata; 2160 dev = cmd->device->hostdata;
2161 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
1775 2162
1776 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 2163 scsi_dma_unmap(cmd); /* undo the DMA mappings */
1777 if ((cp->cmd_type == CMD_SCSI) && 2164 if ((cp->cmd_type == CMD_SCSI) &&
1778 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) 2165 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
1779 hpsa_unmap_sg_chain_block(h, cp); 2166 hpsa_unmap_sg_chain_block(h, cp);
1780 2167
2168 if ((cp->cmd_type == CMD_IOACCEL2) &&
2169 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2170 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2171
1781 cmd->result = (DID_OK << 16); /* host byte */ 2172 cmd->result = (DID_OK << 16); /* host byte */
1782 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 2173 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1783 2174
1784 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) 2175 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
1785 atomic_dec(&cp->phys_disk->ioaccel_cmds_out); 2176 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
1786 2177
1787 if (cp->cmd_type == CMD_IOACCEL2) 2178 /*
1788 return process_ioaccel2_completion(h, cp, cmd, dev); 2179 * We check for lockup status here as it may be set for
1789 2180 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
1790 cmd->result |= ei->ScsiStatus; 2181 * fail_all_oustanding_cmds()
2182 */
2183 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2184 /* DID_NO_CONNECT will prevent a retry */
2185 cmd->result = DID_NO_CONNECT << 16;
2186 return hpsa_cmd_free_and_done(h, cp, cmd);
2187 }
1791 2188
1792 scsi_set_resid(cmd, ei->ResidualCnt); 2189 if ((unlikely(hpsa_is_pending_event(cp)))) {
1793 if (ei->CommandStatus == 0) { 2190 if (cp->reset_pending)
1794 if (cp->cmd_type == CMD_IOACCEL1) 2191 return hpsa_cmd_resolve_and_free(h, cp);
1795 atomic_dec(&cp->phys_disk->ioaccel_cmds_out); 2192 if (cp->abort_pending)
1796 cmd_free(h, cp); 2193 return hpsa_cmd_abort_and_free(h, cp, cmd);
1797 cmd->scsi_done(cmd);
1798 return;
1799 } 2194 }
1800 2195
1801 /* copy the sense data */ 2196 if (cp->cmd_type == CMD_IOACCEL2)
1802 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) 2197 return process_ioaccel2_completion(h, cp, cmd, dev);
1803 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1804 else
1805 sense_data_size = sizeof(ei->SenseInfo);
1806 if (ei->SenseLen < sense_data_size)
1807 sense_data_size = ei->SenseLen;
1808 2198
1809 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); 2199 scsi_set_resid(cmd, ei->ResidualCnt);
2200 if (ei->CommandStatus == 0)
2201 return hpsa_cmd_free_and_done(h, cp, cmd);
1810 2202
1811 /* For I/O accelerator commands, copy over some fields to the normal 2203 /* For I/O accelerator commands, copy over some fields to the normal
1812 * CISS header used below for error handling. 2204 * CISS header used below for error handling.
@@ -1828,10 +2220,7 @@ static void complete_scsi_command(struct CommandList *cp)
1828 if (is_logical_dev_addr_mode(dev->scsi3addr)) { 2220 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1829 if (ei->CommandStatus == CMD_IOACCEL_DISABLED) 2221 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1830 dev->offload_enabled = 0; 2222 dev->offload_enabled = 0;
1831 INIT_WORK(&cp->work, hpsa_command_resubmit_worker); 2223 return hpsa_retry_cmd(h, cp);
1832 queue_work_on(raw_smp_processor_id(),
1833 h->resubmit_wq, &cp->work);
1834 return;
1835 } 2224 }
1836 } 2225 }
1837 2226
@@ -1839,14 +2228,18 @@ static void complete_scsi_command(struct CommandList *cp)
1839 switch (ei->CommandStatus) { 2228 switch (ei->CommandStatus) {
1840 2229
1841 case CMD_TARGET_STATUS: 2230 case CMD_TARGET_STATUS:
1842 if (ei->ScsiStatus) { 2231 cmd->result |= ei->ScsiStatus;
1843 /* Get sense key */ 2232 /* copy the sense data */
1844 sense_key = 0xf & ei->SenseInfo[2]; 2233 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1845 /* Get additional sense code */ 2234 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1846 asc = ei->SenseInfo[12]; 2235 else
1847 /* Get addition sense code qualifier */ 2236 sense_data_size = sizeof(ei->SenseInfo);
1848 ascq = ei->SenseInfo[13]; 2237 if (ei->SenseLen < sense_data_size)
1849 } 2238 sense_data_size = ei->SenseLen;
2239 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2240 if (ei->ScsiStatus)
2241 decode_sense_data(ei->SenseInfo, sense_data_size,
2242 &sense_key, &asc, &ascq);
1850 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 2243 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1851 if (sense_key == ABORTED_COMMAND) { 2244 if (sense_key == ABORTED_COMMAND) {
1852 cmd->result |= DID_SOFT_ERROR << 16; 2245 cmd->result |= DID_SOFT_ERROR << 16;
@@ -1918,10 +2311,8 @@ static void complete_scsi_command(struct CommandList *cp)
1918 cp->Request.CDB); 2311 cp->Request.CDB);
1919 break; 2312 break;
1920 case CMD_ABORTED: 2313 case CMD_ABORTED:
1921 cmd->result = DID_ABORT << 16; 2314 /* Return now to avoid calling scsi_done(). */
1922 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n", 2315 return hpsa_cmd_abort_and_free(h, cp, cmd);
1923 cp->Request.CDB, ei->ScsiStatus);
1924 break;
1925 case CMD_ABORT_FAILED: 2316 case CMD_ABORT_FAILED:
1926 cmd->result = DID_ERROR << 16; 2317 cmd->result = DID_ERROR << 16;
1927 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", 2318 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
@@ -1941,6 +2332,10 @@ static void complete_scsi_command(struct CommandList *cp)
1941 cmd->result = DID_ERROR << 16; 2332 cmd->result = DID_ERROR << 16;
1942 dev_warn(&h->pdev->dev, "Command unabortable\n"); 2333 dev_warn(&h->pdev->dev, "Command unabortable\n");
1943 break; 2334 break;
2335 case CMD_TMF_STATUS:
2336 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2337 cmd->result = DID_ERROR << 16;
2338 break;
1944 case CMD_IOACCEL_DISABLED: 2339 case CMD_IOACCEL_DISABLED:
1945 /* This only handles the direct pass-through case since RAID 2340 /* This only handles the direct pass-through case since RAID
1946 * offload is handled above. Just attempt a retry. 2341 * offload is handled above. Just attempt a retry.
@@ -1954,8 +2349,8 @@ static void complete_scsi_command(struct CommandList *cp)
1954 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 2349 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1955 cp, ei->CommandStatus); 2350 cp, ei->CommandStatus);
1956 } 2351 }
1957 cmd_free(h, cp); 2352
1958 cmd->scsi_done(cmd); 2353 return hpsa_cmd_free_and_done(h, cp, cmd);
1959} 2354}
1960 2355
1961static void hpsa_pci_unmap(struct pci_dev *pdev, 2356static void hpsa_pci_unmap(struct pci_dev *pdev,
@@ -1998,14 +2393,36 @@ static int hpsa_map_one(struct pci_dev *pdev,
1998 return 0; 2393 return 0;
1999} 2394}
2000 2395
2001static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, 2396#define NO_TIMEOUT ((unsigned long) -1)
2002 struct CommandList *c) 2397#define DEFAULT_TIMEOUT 30000 /* milliseconds */
2398static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2399 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2003{ 2400{
2004 DECLARE_COMPLETION_ONSTACK(wait); 2401 DECLARE_COMPLETION_ONSTACK(wait);
2005 2402
2006 c->waiting = &wait; 2403 c->waiting = &wait;
2007 enqueue_cmd_and_start_io(h, c); 2404 __enqueue_cmd_and_start_io(h, c, reply_queue);
2008 wait_for_completion(&wait); 2405 if (timeout_msecs == NO_TIMEOUT) {
2406 /* TODO: get rid of this no-timeout thing */
2407 wait_for_completion_io(&wait);
2408 return IO_OK;
2409 }
2410 if (!wait_for_completion_io_timeout(&wait,
2411 msecs_to_jiffies(timeout_msecs))) {
2412 dev_warn(&h->pdev->dev, "Command timed out.\n");
2413 return -ETIMEDOUT;
2414 }
2415 return IO_OK;
2416}
2417
2418static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2419 int reply_queue, unsigned long timeout_msecs)
2420{
2421 if (unlikely(lockup_detected(h))) {
2422 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2423 return IO_OK;
2424 }
2425 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2009} 2426}
2010 2427
2011static u32 lockup_detected(struct ctlr_info *h) 2428static u32 lockup_detected(struct ctlr_info *h)
@@ -2020,25 +2437,19 @@ static u32 lockup_detected(struct ctlr_info *h)
2020 return rc; 2437 return rc;
2021} 2438}
2022 2439
2023static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
2024 struct CommandList *c)
2025{
2026 /* If controller lockup detected, fake a hardware error. */
2027 if (unlikely(lockup_detected(h)))
2028 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
2029 else
2030 hpsa_scsi_do_simple_cmd_core(h, c);
2031}
2032
2033#define MAX_DRIVER_CMD_RETRIES 25 2440#define MAX_DRIVER_CMD_RETRIES 25
2034static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, 2441static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2035 struct CommandList *c, int data_direction) 2442 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2036{ 2443{
2037 int backoff_time = 10, retry_count = 0; 2444 int backoff_time = 10, retry_count = 0;
2445 int rc;
2038 2446
2039 do { 2447 do {
2040 memset(c->err_info, 0, sizeof(*c->err_info)); 2448 memset(c->err_info, 0, sizeof(*c->err_info));
2041 hpsa_scsi_do_simple_cmd_core(h, c); 2449 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2450 timeout_msecs);
2451 if (rc)
2452 break;
2042 retry_count++; 2453 retry_count++;
2043 if (retry_count > 3) { 2454 if (retry_count > 3) {
2044 msleep(backoff_time); 2455 msleep(backoff_time);
@@ -2049,6 +2460,9 @@ static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2049 check_for_busy(h, c)) && 2460 check_for_busy(h, c)) &&
2050 retry_count <= MAX_DRIVER_CMD_RETRIES); 2461 retry_count <= MAX_DRIVER_CMD_RETRIES);
2051 hpsa_pci_unmap(h->pdev, c, 1, data_direction); 2462 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2463 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2464 rc = -EIO;
2465 return rc;
2052} 2466}
2053 2467
2054static void hpsa_print_cmd(struct ctlr_info *h, char *txt, 2468static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
@@ -2072,16 +2486,23 @@ static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2072{ 2486{
2073 const struct ErrorInfo *ei = cp->err_info; 2487 const struct ErrorInfo *ei = cp->err_info;
2074 struct device *d = &cp->h->pdev->dev; 2488 struct device *d = &cp->h->pdev->dev;
2075 const u8 *sd = ei->SenseInfo; 2489 u8 sense_key, asc, ascq;
2490 int sense_len;
2076 2491
2077 switch (ei->CommandStatus) { 2492 switch (ei->CommandStatus) {
2078 case CMD_TARGET_STATUS: 2493 case CMD_TARGET_STATUS:
2494 if (ei->SenseLen > sizeof(ei->SenseInfo))
2495 sense_len = sizeof(ei->SenseInfo);
2496 else
2497 sense_len = ei->SenseLen;
2498 decode_sense_data(ei->SenseInfo, sense_len,
2499 &sense_key, &asc, &ascq);
2079 hpsa_print_cmd(h, "SCSI status", cp); 2500 hpsa_print_cmd(h, "SCSI status", cp);
2080 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) 2501 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2081 dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n", 2502 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2082 sd[2] & 0x0f, sd[12], sd[13]); 2503 sense_key, asc, ascq);
2083 else 2504 else
2084 dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus); 2505 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2085 if (ei->ScsiStatus == 0) 2506 if (ei->ScsiStatus == 0)
2086 dev_warn(d, "SCSI status is abnormally zero. " 2507 dev_warn(d, "SCSI status is abnormally zero. "
2087 "(probably indicates selection timeout " 2508 "(probably indicates selection timeout "
@@ -2125,6 +2546,9 @@ static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2125 case CMD_UNABORTABLE: 2546 case CMD_UNABORTABLE:
2126 hpsa_print_cmd(h, "unabortable", cp); 2547 hpsa_print_cmd(h, "unabortable", cp);
2127 break; 2548 break;
2549 case CMD_CTLR_LOCKUP:
2550 hpsa_print_cmd(h, "controller lockup detected", cp);
2551 break;
2128 default: 2552 default:
2129 hpsa_print_cmd(h, "unknown status", cp); 2553 hpsa_print_cmd(h, "unknown status", cp);
2130 dev_warn(d, "Unknown command status %x\n", 2554 dev_warn(d, "Unknown command status %x\n",
@@ -2142,17 +2566,15 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2142 2566
2143 c = cmd_alloc(h); 2567 c = cmd_alloc(h);
2144 2568
2145 if (c == NULL) {
2146 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2147 return -ENOMEM;
2148 }
2149
2150 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, 2569 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2151 page, scsi3addr, TYPE_CMD)) { 2570 page, scsi3addr, TYPE_CMD)) {
2152 rc = -1; 2571 rc = -1;
2153 goto out; 2572 goto out;
2154 } 2573 }
2155 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2574 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2575 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2576 if (rc)
2577 goto out;
2156 ei = c->err_info; 2578 ei = c->err_info;
2157 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2579 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2158 hpsa_scsi_interpret_error(h, c); 2580 hpsa_scsi_interpret_error(h, c);
@@ -2172,17 +2594,15 @@ static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2172 struct ErrorInfo *ei; 2594 struct ErrorInfo *ei;
2173 2595
2174 c = cmd_alloc(h); 2596 c = cmd_alloc(h);
2175 if (c == NULL) { /* trouble... */
2176 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2177 return -ENOMEM;
2178 }
2179
2180 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize, 2597 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2181 page, scsi3addr, TYPE_CMD)) { 2598 page, scsi3addr, TYPE_CMD)) {
2182 rc = -1; 2599 rc = -1;
2183 goto out; 2600 goto out;
2184 } 2601 }
2185 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2602 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2603 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2604 if (rc)
2605 goto out;
2186 ei = c->err_info; 2606 ei = c->err_info;
2187 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2607 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2188 hpsa_scsi_interpret_error(h, c); 2608 hpsa_scsi_interpret_error(h, c);
@@ -2191,10 +2611,10 @@ static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2191out: 2611out:
2192 cmd_free(h, c); 2612 cmd_free(h, c);
2193 return rc; 2613 return rc;
2194 } 2614}
2195 2615
2196static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, 2616static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2197 u8 reset_type) 2617 u8 reset_type, int reply_queue)
2198{ 2618{
2199 int rc = IO_OK; 2619 int rc = IO_OK;
2200 struct CommandList *c; 2620 struct CommandList *c;
@@ -2202,16 +2622,16 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2202 2622
2203 c = cmd_alloc(h); 2623 c = cmd_alloc(h);
2204 2624
2205 if (c == NULL) { /* trouble... */
2206 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2207 return -ENOMEM;
2208 }
2209 2625
2210 /* fill_cmd can't fail here, no data buffer to map. */ 2626 /* fill_cmd can't fail here, no data buffer to map. */
2211 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 2627 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2212 scsi3addr, TYPE_MSG); 2628 scsi3addr, TYPE_MSG);
2213 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */ 2629 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
2214 hpsa_scsi_do_simple_cmd_core(h, c); 2630 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2631 if (rc) {
2632 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2633 goto out;
2634 }
2215 /* no unmap needed here because no data xfer. */ 2635 /* no unmap needed here because no data xfer. */
2216 2636
2217 ei = c->err_info; 2637 ei = c->err_info;
@@ -2219,10 +2639,129 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2219 hpsa_scsi_interpret_error(h, c); 2639 hpsa_scsi_interpret_error(h, c);
2220 rc = -1; 2640 rc = -1;
2221 } 2641 }
2642out:
2222 cmd_free(h, c); 2643 cmd_free(h, c);
2223 return rc; 2644 return rc;
2224} 2645}
2225 2646
2647static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
2648 struct hpsa_scsi_dev_t *dev,
2649 unsigned char *scsi3addr)
2650{
2651 int i;
2652 bool match = false;
2653 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2654 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
2655
2656 if (hpsa_is_cmd_idle(c))
2657 return false;
2658
2659 switch (c->cmd_type) {
2660 case CMD_SCSI:
2661 case CMD_IOCTL_PEND:
2662 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
2663 sizeof(c->Header.LUN.LunAddrBytes));
2664 break;
2665
2666 case CMD_IOACCEL1:
2667 case CMD_IOACCEL2:
2668 if (c->phys_disk == dev) {
2669 /* HBA mode match */
2670 match = true;
2671 } else {
2672 /* Possible RAID mode -- check each phys dev. */
2673 /* FIXME: Do we need to take out a lock here? If
2674 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
2675 * instead. */
2676 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2677 /* FIXME: an alternate test might be
2678 *
2679 * match = dev->phys_disk[i]->ioaccel_handle
2680 * == c2->scsi_nexus; */
2681 match = dev->phys_disk[i] == c->phys_disk;
2682 }
2683 }
2684 break;
2685
2686 case IOACCEL2_TMF:
2687 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2688 match = dev->phys_disk[i]->ioaccel_handle ==
2689 le32_to_cpu(ac->it_nexus);
2690 }
2691 break;
2692
2693 case 0: /* The command is in the middle of being initialized. */
2694 match = false;
2695 break;
2696
2697 default:
2698 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
2699 c->cmd_type);
2700 BUG();
2701 }
2702
2703 return match;
2704}
2705
2706static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
2707 unsigned char *scsi3addr, u8 reset_type, int reply_queue)
2708{
2709 int i;
2710 int rc = 0;
2711
2712 /* We can really only handle one reset at a time */
2713 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
2714 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
2715 return -EINTR;
2716 }
2717
2718 BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
2719
2720 for (i = 0; i < h->nr_cmds; i++) {
2721 struct CommandList *c = h->cmd_pool + i;
2722 int refcount = atomic_inc_return(&c->refcount);
2723
2724 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
2725 unsigned long flags;
2726
2727 /*
2728 * Mark the target command as having a reset pending,
2729 * then lock a lock so that the command cannot complete
2730 * while we're considering it. If the command is not
2731 * idle then count it; otherwise revoke the event.
2732 */
2733 c->reset_pending = dev;
2734 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
2735 if (!hpsa_is_cmd_idle(c))
2736 atomic_inc(&dev->reset_cmds_out);
2737 else
2738 c->reset_pending = NULL;
2739 spin_unlock_irqrestore(&h->lock, flags);
2740 }
2741
2742 cmd_free(h, c);
2743 }
2744
2745 rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
2746 if (!rc)
2747 wait_event(h->event_sync_wait_queue,
2748 atomic_read(&dev->reset_cmds_out) == 0 ||
2749 lockup_detected(h));
2750
2751 if (unlikely(lockup_detected(h))) {
2752 dev_warn(&h->pdev->dev,
2753 "Controller lockup detected during reset wait\n");
2754 mutex_unlock(&h->reset_mutex);
2755 rc = -ENODEV;
2756 }
2757
2758 if (unlikely(rc))
2759 atomic_set(&dev->reset_cmds_out, 0);
2760
2761 mutex_unlock(&h->reset_mutex);
2762 return rc;
2763}
2764
2226static void hpsa_get_raid_level(struct ctlr_info *h, 2765static void hpsa_get_raid_level(struct ctlr_info *h,
2227 unsigned char *scsi3addr, unsigned char *raid_level) 2766 unsigned char *scsi3addr, unsigned char *raid_level)
2228{ 2767{
@@ -2328,23 +2867,23 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
2328 struct ErrorInfo *ei; 2867 struct ErrorInfo *ei;
2329 2868
2330 c = cmd_alloc(h); 2869 c = cmd_alloc(h);
2331 if (c == NULL) { 2870
2332 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2333 return -ENOMEM;
2334 }
2335 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, 2871 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2336 sizeof(this_device->raid_map), 0, 2872 sizeof(this_device->raid_map), 0,
2337 scsi3addr, TYPE_CMD)) { 2873 scsi3addr, TYPE_CMD)) {
2338 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); 2874 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
2339 cmd_free(h, c); 2875 cmd_free(h, c);
2340 return -ENOMEM; 2876 return -1;
2341 } 2877 }
2342 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2878 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2879 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2880 if (rc)
2881 goto out;
2343 ei = c->err_info; 2882 ei = c->err_info;
2344 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2883 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2345 hpsa_scsi_interpret_error(h, c); 2884 hpsa_scsi_interpret_error(h, c);
2346 cmd_free(h, c); 2885 rc = -1;
2347 return -1; 2886 goto out;
2348 } 2887 }
2349 cmd_free(h, c); 2888 cmd_free(h, c);
2350 2889
@@ -2356,6 +2895,9 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
2356 } 2895 }
2357 hpsa_debug_map_buff(h, rc, &this_device->raid_map); 2896 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2358 return rc; 2897 return rc;
2898out:
2899 cmd_free(h, c);
2900 return rc;
2359} 2901}
2360 2902
2361static int hpsa_bmic_id_physical_device(struct ctlr_info *h, 2903static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
@@ -2375,7 +2917,8 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
2375 c->Request.CDB[2] = bmic_device_index & 0xff; 2917 c->Request.CDB[2] = bmic_device_index & 0xff;
2376 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; 2918 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
2377 2919
2378 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2920 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
2921 NO_TIMEOUT);
2379 ei = c->err_info; 2922 ei = c->err_info;
2380 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2923 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2381 hpsa_scsi_interpret_error(h, c); 2924 hpsa_scsi_interpret_error(h, c);
@@ -2438,6 +2981,7 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2438 2981
2439 this_device->offload_config = 0; 2982 this_device->offload_config = 0;
2440 this_device->offload_enabled = 0; 2983 this_device->offload_enabled = 0;
2984 this_device->offload_to_be_enabled = 0;
2441 2985
2442 buf = kzalloc(64, GFP_KERNEL); 2986 buf = kzalloc(64, GFP_KERNEL);
2443 if (!buf) 2987 if (!buf)
@@ -2461,6 +3005,7 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2461 if (hpsa_get_raid_map(h, scsi3addr, this_device)) 3005 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2462 this_device->offload_enabled = 0; 3006 this_device->offload_enabled = 0;
2463 } 3007 }
3008 this_device->offload_to_be_enabled = this_device->offload_enabled;
2464out: 3009out:
2465 kfree(buf); 3010 kfree(buf);
2466 return; 3011 return;
@@ -2495,10 +3040,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2495 struct ErrorInfo *ei; 3040 struct ErrorInfo *ei;
2496 3041
2497 c = cmd_alloc(h); 3042 c = cmd_alloc(h);
2498 if (c == NULL) { /* trouble... */ 3043
2499 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2500 return -1;
2501 }
2502 /* address the controller */ 3044 /* address the controller */
2503 memset(scsi3addr, 0, sizeof(scsi3addr)); 3045 memset(scsi3addr, 0, sizeof(scsi3addr));
2504 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, 3046 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
@@ -2508,7 +3050,10 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2508 } 3050 }
2509 if (extended_response) 3051 if (extended_response)
2510 c->Request.CDB[1] = extended_response; 3052 c->Request.CDB[1] = extended_response;
2511 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 3053 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3054 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3055 if (rc)
3056 goto out;
2512 ei = c->err_info; 3057 ei = c->err_info;
2513 if (ei->CommandStatus != 0 && 3058 if (ei->CommandStatus != 0 &&
2514 ei->CommandStatus != CMD_DATA_UNDERRUN) { 3059 ei->CommandStatus != CMD_DATA_UNDERRUN) {
@@ -2600,8 +3145,10 @@ static int hpsa_volume_offline(struct ctlr_info *h,
2600 unsigned char scsi3addr[]) 3145 unsigned char scsi3addr[])
2601{ 3146{
2602 struct CommandList *c; 3147 struct CommandList *c;
2603 unsigned char *sense, sense_key, asc, ascq; 3148 unsigned char *sense;
2604 int ldstat = 0; 3149 u8 sense_key, asc, ascq;
3150 int sense_len;
3151 int rc, ldstat = 0;
2605 u16 cmd_status; 3152 u16 cmd_status;
2606 u8 scsi_status; 3153 u8 scsi_status;
2607#define ASC_LUN_NOT_READY 0x04 3154#define ASC_LUN_NOT_READY 0x04
@@ -2609,14 +3156,19 @@ static int hpsa_volume_offline(struct ctlr_info *h,
2609#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 3156#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2610 3157
2611 c = cmd_alloc(h); 3158 c = cmd_alloc(h);
2612 if (!c) 3159
2613 return 0;
2614 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); 3160 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
2615 hpsa_scsi_do_simple_cmd_core(h, c); 3161 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3162 if (rc) {
3163 cmd_free(h, c);
3164 return 0;
3165 }
2616 sense = c->err_info->SenseInfo; 3166 sense = c->err_info->SenseInfo;
2617 sense_key = sense[2]; 3167 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
2618 asc = sense[12]; 3168 sense_len = sizeof(c->err_info->SenseInfo);
2619 ascq = sense[13]; 3169 else
3170 sense_len = c->err_info->SenseLen;
3171 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
2620 cmd_status = c->err_info->CommandStatus; 3172 cmd_status = c->err_info->CommandStatus;
2621 scsi_status = c->err_info->ScsiStatus; 3173 scsi_status = c->err_info->ScsiStatus;
2622 cmd_free(h, c); 3174 cmd_free(h, c);
@@ -2656,6 +3208,52 @@ static int hpsa_volume_offline(struct ctlr_info *h,
2656 return 0; 3208 return 0;
2657} 3209}
2658 3210
3211/*
3212 * Find out if a logical device supports aborts by simply trying one.
3213 * Smart Array may claim not to support aborts on logical drives, but
3214 * if a MSA2000 * is connected, the drives on that will be presented
3215 * by the Smart Array as logical drives, and aborts may be sent to
3216 * those devices successfully. So the simplest way to find out is
3217 * to simply try an abort and see how the device responds.
3218 */
3219static int hpsa_device_supports_aborts(struct ctlr_info *h,
3220 unsigned char *scsi3addr)
3221{
3222 struct CommandList *c;
3223 struct ErrorInfo *ei;
3224 int rc = 0;
3225
3226 u64 tag = (u64) -1; /* bogus tag */
3227
3228 /* Assume that physical devices support aborts */
3229 if (!is_logical_dev_addr_mode(scsi3addr))
3230 return 1;
3231
3232 c = cmd_alloc(h);
3233
3234 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3235 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3236 /* no unmap needed here because no data xfer. */
3237 ei = c->err_info;
3238 switch (ei->CommandStatus) {
3239 case CMD_INVALID:
3240 rc = 0;
3241 break;
3242 case CMD_UNABORTABLE:
3243 case CMD_ABORT_FAILED:
3244 rc = 1;
3245 break;
3246 case CMD_TMF_STATUS:
3247 rc = hpsa_evaluate_tmf_status(h, c);
3248 break;
3249 default:
3250 rc = 0;
3251 break;
3252 }
3253 cmd_free(h, c);
3254 return rc;
3255}
3256
2659static int hpsa_update_device_info(struct ctlr_info *h, 3257static int hpsa_update_device_info(struct ctlr_info *h,
2660 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 3258 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
2661 unsigned char *is_OBDR_device) 3259 unsigned char *is_OBDR_device)
@@ -2708,6 +3306,8 @@ static int hpsa_update_device_info(struct ctlr_info *h,
2708 this_device->raid_level = RAID_UNKNOWN; 3306 this_device->raid_level = RAID_UNKNOWN;
2709 this_device->offload_config = 0; 3307 this_device->offload_config = 0;
2710 this_device->offload_enabled = 0; 3308 this_device->offload_enabled = 0;
3309 this_device->offload_to_be_enabled = 0;
3310 this_device->hba_ioaccel_enabled = 0;
2711 this_device->volume_offline = 0; 3311 this_device->volume_offline = 0;
2712 this_device->queue_depth = h->nr_cmds; 3312 this_device->queue_depth = h->nr_cmds;
2713 } 3313 }
@@ -2721,7 +3321,6 @@ static int hpsa_update_device_info(struct ctlr_info *h,
2721 strncmp(obdr_sig, OBDR_TAPE_SIG, 3321 strncmp(obdr_sig, OBDR_TAPE_SIG,
2722 OBDR_SIG_LEN) == 0); 3322 OBDR_SIG_LEN) == 0);
2723 } 3323 }
2724
2725 kfree(inq_buff); 3324 kfree(inq_buff);
2726 return 0; 3325 return 0;
2727 3326
@@ -2730,6 +3329,31 @@ bail_out:
2730 return 1; 3329 return 1;
2731} 3330}
2732 3331
3332static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3333 struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3334{
3335 unsigned long flags;
3336 int rc, entry;
3337 /*
3338 * See if this device supports aborts. If we already know
3339 * the device, we already know if it supports aborts, otherwise
3340 * we have to find out if it supports aborts by trying one.
3341 */
3342 spin_lock_irqsave(&h->devlock, flags);
3343 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3344 if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3345 entry >= 0 && entry < h->ndevices) {
3346 dev->supports_aborts = h->dev[entry]->supports_aborts;
3347 spin_unlock_irqrestore(&h->devlock, flags);
3348 } else {
3349 spin_unlock_irqrestore(&h->devlock, flags);
3350 dev->supports_aborts =
3351 hpsa_device_supports_aborts(h, scsi3addr);
3352 if (dev->supports_aborts < 0)
3353 dev->supports_aborts = 0;
3354 }
3355}
3356
2733static unsigned char *ext_target_model[] = { 3357static unsigned char *ext_target_model[] = {
2734 "MSA2012", 3358 "MSA2012",
2735 "MSA2024", 3359 "MSA2024",
@@ -2835,6 +3459,7 @@ static int add_ext_target_dev(struct ctlr_info *h,
2835 (*n_ext_target_devs)++; 3459 (*n_ext_target_devs)++;
2836 hpsa_set_bus_target_lun(this_device, 3460 hpsa_set_bus_target_lun(this_device,
2837 tmpdevice->bus, tmpdevice->target, 0); 3461 tmpdevice->bus, tmpdevice->target, 0);
3462 hpsa_update_device_supports_aborts(h, this_device, scsi3addr);
2838 set_bit(tmpdevice->target, lunzerobits); 3463 set_bit(tmpdevice->target, lunzerobits);
2839 return 1; 3464 return 1;
2840} 3465}
@@ -2850,88 +3475,23 @@ static int add_ext_target_dev(struct ctlr_info *h,
2850static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, 3475static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2851 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) 3476 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
2852{ 3477{
2853 struct ReportExtendedLUNdata *physicals = NULL; 3478 struct io_accel2_cmd *c2 =
2854 int responsesize = 24; /* size of physical extended response */ 3479 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
2855 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; 3480 unsigned long flags;
2856 u32 nphysicals = 0; /* number of reported physical devs */
2857 int found = 0; /* found match (1) or not (0) */
2858 u32 find; /* handle we need to match */
2859 int i; 3481 int i;
2860 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
2861 struct hpsa_scsi_dev_t *d; /* device of request being aborted */
2862 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
2863 __le32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2864 __le32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2865
2866 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
2867 return 0; /* no match */
2868
2869 /* point to the ioaccel2 device handle */
2870 c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
2871 if (c2a == NULL)
2872 return 0; /* no match */
2873
2874 scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
2875 if (scmd == NULL)
2876 return 0; /* no match */
2877
2878 d = scmd->device->hostdata;
2879 if (d == NULL)
2880 return 0; /* no match */
2881
2882 it_nexus = cpu_to_le32(d->ioaccel_handle);
2883 scsi_nexus = c2a->scsi_nexus;
2884 find = le32_to_cpu(c2a->scsi_nexus);
2885
2886 if (h->raid_offload_debug > 0)
2887 dev_info(&h->pdev->dev,
2888 "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
2889 __func__, scsi_nexus,
2890 d->device_id[0], d->device_id[1], d->device_id[2],
2891 d->device_id[3], d->device_id[4], d->device_id[5],
2892 d->device_id[6], d->device_id[7], d->device_id[8],
2893 d->device_id[9], d->device_id[10], d->device_id[11],
2894 d->device_id[12], d->device_id[13], d->device_id[14],
2895 d->device_id[15]);
2896
2897 /* Get the list of physical devices */
2898 physicals = kzalloc(reportsize, GFP_KERNEL);
2899 if (physicals == NULL)
2900 return 0;
2901 if (hpsa_scsi_do_report_phys_luns(h, physicals, reportsize)) {
2902 dev_err(&h->pdev->dev,
2903 "Can't lookup %s device handle: report physical LUNs failed.\n",
2904 "HP SSD Smart Path");
2905 kfree(physicals);
2906 return 0;
2907 }
2908 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
2909 responsesize;
2910
2911 /* find ioaccel2 handle in list of physicals: */
2912 for (i = 0; i < nphysicals; i++) {
2913 struct ext_report_lun_entry *entry = &physicals->LUN[i];
2914
2915 /* handle is in bytes 28-31 of each lun */
2916 if (entry->ioaccel_handle != find)
2917 continue; /* didn't match */
2918 found = 1;
2919 memcpy(scsi3addr, entry->lunid, 8);
2920 if (h->raid_offload_debug > 0)
2921 dev_info(&h->pdev->dev,
2922 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n",
2923 __func__, find,
2924 entry->ioaccel_handle, scsi3addr);
2925 break; /* found it */
2926 }
2927
2928 kfree(physicals);
2929 if (found)
2930 return 1;
2931 else
2932 return 0;
2933 3482
3483 spin_lock_irqsave(&h->devlock, flags);
3484 for (i = 0; i < h->ndevices; i++)
3485 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3486 memcpy(scsi3addr, h->dev[i]->scsi3addr,
3487 sizeof(h->dev[i]->scsi3addr));
3488 spin_unlock_irqrestore(&h->devlock, flags);
3489 return 1;
3490 }
3491 spin_unlock_irqrestore(&h->devlock, flags);
3492 return 0;
2934} 3493}
3494
2935/* 3495/*
2936 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, 3496 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
2937 * logdev. The number of luns in physdev and logdev are returned in 3497 * logdev. The number of luns in physdev and logdev are returned in
@@ -3036,6 +3596,8 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3036 (struct ext_report_lun_entry *) lunaddrbytes; 3596 (struct ext_report_lun_entry *) lunaddrbytes;
3037 3597
3038 dev->ioaccel_handle = rle->ioaccel_handle; 3598 dev->ioaccel_handle = rle->ioaccel_handle;
3599 if (PHYS_IOACCEL(lunaddrbytes) && dev->ioaccel_handle)
3600 dev->hba_ioaccel_enabled = 1;
3039 memset(id_phys, 0, sizeof(*id_phys)); 3601 memset(id_phys, 0, sizeof(*id_phys));
3040 rc = hpsa_bmic_id_physical_device(h, lunaddrbytes, 3602 rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
3041 GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys, 3603 GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
@@ -3050,6 +3612,7 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3050 else 3612 else
3051 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */ 3613 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3052 atomic_set(&dev->ioaccel_cmds_out, 0); 3614 atomic_set(&dev->ioaccel_cmds_out, 0);
3615 atomic_set(&dev->reset_cmds_out, 0);
3053} 3616}
3054 3617
3055static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 3618static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
@@ -3142,16 +3705,19 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3142 /* Figure out where the LUN ID info is coming from */ 3705 /* Figure out where the LUN ID info is coming from */
3143 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 3706 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3144 i, nphysicals, nlogicals, physdev_list, logdev_list); 3707 i, nphysicals, nlogicals, physdev_list, logdev_list);
3145 /* skip masked physical devices. */ 3708
3146 if (lunaddrbytes[3] & 0xC0 && 3709 /* skip masked non-disk devices */
3147 i < nphysicals + (raid_ctlr_position == 0)) 3710 if (MASKED_DEVICE(lunaddrbytes))
3148 continue; 3711 if (i < nphysicals + (raid_ctlr_position == 0) &&
3712 NON_DISK_PHYS_DEV(lunaddrbytes))
3713 continue;
3149 3714
3150 /* Get device type, vendor, model, device id */ 3715 /* Get device type, vendor, model, device id */
3151 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, 3716 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3152 &is_OBDR)) 3717 &is_OBDR))
3153 continue; /* skip it if we can't talk to it. */ 3718 continue; /* skip it if we can't talk to it. */
3154 figure_bus_target_lun(h, lunaddrbytes, tmpdevice); 3719 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
3720 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
3155 this_device = currentsd[ncurrent]; 3721 this_device = currentsd[ncurrent];
3156 3722
3157 /* 3723 /*
@@ -3170,6 +3736,18 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3170 3736
3171 *this_device = *tmpdevice; 3737 *this_device = *tmpdevice;
3172 3738
3739 /* do not expose masked devices */
3740 if (MASKED_DEVICE(lunaddrbytes) &&
3741 i < nphysicals + (raid_ctlr_position == 0)) {
3742 if (h->hba_mode_enabled)
3743 dev_warn(&h->pdev->dev,
3744 "Masked physical device detected\n");
3745 this_device->expose_state = HPSA_DO_NOT_EXPOSE;
3746 } else {
3747 this_device->expose_state =
3748 HPSA_SG_ATTACH | HPSA_ULD_ATTACH;
3749 }
3750
3173 switch (this_device->devtype) { 3751 switch (this_device->devtype) {
3174 case TYPE_ROM: 3752 case TYPE_ROM:
3175 /* We don't *really* support actual CD-ROM devices, 3753 /* We don't *really* support actual CD-ROM devices,
@@ -3183,34 +3761,31 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3183 ncurrent++; 3761 ncurrent++;
3184 break; 3762 break;
3185 case TYPE_DISK: 3763 case TYPE_DISK:
3186 if (h->hba_mode_enabled) { 3764 if (i >= nphysicals) {
3187 /* never use raid mapper in HBA mode */
3188 this_device->offload_enabled = 0;
3189 ncurrent++;
3190 break;
3191 } else if (h->acciopath_status) {
3192 if (i >= nphysicals) {
3193 ncurrent++;
3194 break;
3195 }
3196 } else {
3197 if (i < nphysicals)
3198 break;
3199 ncurrent++; 3765 ncurrent++;
3200 break; 3766 break;
3201 } 3767 }
3202 if (h->transMethod & CFGTBL_Trans_io_accel1 || 3768
3203 h->transMethod & CFGTBL_Trans_io_accel2) { 3769 if (h->hba_mode_enabled)
3204 hpsa_get_ioaccel_drive_info(h, this_device, 3770 /* never use raid mapper in HBA mode */
3205 lunaddrbytes, id_phys); 3771 this_device->offload_enabled = 0;
3206 atomic_set(&this_device->ioaccel_cmds_out, 0); 3772 else if (!(h->transMethod & CFGTBL_Trans_io_accel1 ||
3207 ncurrent++; 3773 h->transMethod & CFGTBL_Trans_io_accel2))
3208 } 3774 break;
3775
3776 hpsa_get_ioaccel_drive_info(h, this_device,
3777 lunaddrbytes, id_phys);
3778 atomic_set(&this_device->ioaccel_cmds_out, 0);
3779 ncurrent++;
3209 break; 3780 break;
3210 case TYPE_TAPE: 3781 case TYPE_TAPE:
3211 case TYPE_MEDIUM_CHANGER: 3782 case TYPE_MEDIUM_CHANGER:
3212 ncurrent++; 3783 ncurrent++;
3213 break; 3784 break;
3785 case TYPE_ENCLOSURE:
3786 if (h->hba_mode_enabled)
3787 ncurrent++;
3788 break;
3214 case TYPE_RAID: 3789 case TYPE_RAID:
3215 /* Only present the Smartarray HBA as a RAID controller. 3790 /* Only present the Smartarray HBA as a RAID controller.
3216 * If it's a RAID controller other than the HBA itself 3791 * If it's a RAID controller other than the HBA itself
@@ -3227,7 +3802,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3227 if (ncurrent >= HPSA_MAX_DEVICES) 3802 if (ncurrent >= HPSA_MAX_DEVICES)
3228 break; 3803 break;
3229 } 3804 }
3230 hpsa_update_log_drive_phys_drive_ptrs(h, currentsd, ncurrent);
3231 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); 3805 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3232out: 3806out:
3233 kfree(tmpdevice); 3807 kfree(tmpdevice);
@@ -3260,7 +3834,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
3260 struct scsi_cmnd *cmd) 3834 struct scsi_cmnd *cmd)
3261{ 3835{
3262 struct scatterlist *sg; 3836 struct scatterlist *sg;
3263 int use_sg, i, sg_index, chained; 3837 int use_sg, i, sg_limit, chained, last_sg;
3264 struct SGDescriptor *curr_sg; 3838 struct SGDescriptor *curr_sg;
3265 3839
3266 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 3840 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
@@ -3272,22 +3846,39 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
3272 if (!use_sg) 3846 if (!use_sg)
3273 goto sglist_finished; 3847 goto sglist_finished;
3274 3848
3849 /*
3850 * If the number of entries is greater than the max for a single list,
3851 * then we have a chained list; we will set up all but one entry in the
3852 * first list (the last entry is saved for link information);
3853 * otherwise, we don't have a chained list and we'll set up at each of
3854 * the entries in the one list.
3855 */
3275 curr_sg = cp->SG; 3856 curr_sg = cp->SG;
3276 chained = 0; 3857 chained = use_sg > h->max_cmd_sg_entries;
3277 sg_index = 0; 3858 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
3278 scsi_for_each_sg(cmd, sg, use_sg, i) { 3859 last_sg = scsi_sg_count(cmd) - 1;
3279 if (i == h->max_cmd_sg_entries - 1 && 3860 scsi_for_each_sg(cmd, sg, sg_limit, i) {
3280 use_sg > h->max_cmd_sg_entries) {
3281 chained = 1;
3282 curr_sg = h->cmd_sg_list[cp->cmdindex];
3283 sg_index = 0;
3284 }
3285 hpsa_set_sg_descriptor(curr_sg, sg); 3861 hpsa_set_sg_descriptor(curr_sg, sg);
3286 curr_sg++; 3862 curr_sg++;
3287 } 3863 }
3288 3864
3865 if (chained) {
3866 /*
3867 * Continue with the chained list. Set curr_sg to the chained
3868 * list. Modify the limit to the total count less the entries
3869 * we've already set up. Resume the scan at the list entry
3870 * where the previous loop left off.
3871 */
3872 curr_sg = h->cmd_sg_list[cp->cmdindex];
3873 sg_limit = use_sg - sg_limit;
3874 for_each_sg(sg, sg, sg_limit, i) {
3875 hpsa_set_sg_descriptor(curr_sg, sg);
3876 curr_sg++;
3877 }
3878 }
3879
3289 /* Back the pointer up to the last entry and mark it as "last". */ 3880 /* Back the pointer up to the last entry and mark it as "last". */
3290 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); 3881 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
3291 3882
3292 if (use_sg + chained > h->maxSG) 3883 if (use_sg + chained > h->maxSG)
3293 h->maxSG = use_sg + chained; 3884 h->maxSG = use_sg + chained;
@@ -3530,10 +4121,7 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3530 u32 len; 4121 u32 len;
3531 u32 total_len = 0; 4122 u32 total_len = 0;
3532 4123
3533 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { 4124 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3534 atomic_dec(&phys_disk->ioaccel_cmds_out);
3535 return IO_ACCEL_INELIGIBLE;
3536 }
3537 4125
3538 if (fixup_ioaccel_cdb(cdb, &cdb_len)) { 4126 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3539 atomic_dec(&phys_disk->ioaccel_cmds_out); 4127 atomic_dec(&phys_disk->ioaccel_cmds_out);
@@ -3556,8 +4144,19 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3556 } 4144 }
3557 4145
3558 if (use_sg) { 4146 if (use_sg) {
3559 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
3560 curr_sg = cp->sg; 4147 curr_sg = cp->sg;
4148 if (use_sg > h->ioaccel_maxsg) {
4149 addr64 = le64_to_cpu(
4150 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4151 curr_sg->address = cpu_to_le64(addr64);
4152 curr_sg->length = 0;
4153 curr_sg->reserved[0] = 0;
4154 curr_sg->reserved[1] = 0;
4155 curr_sg->reserved[2] = 0;
4156 curr_sg->chain_indicator = 0x80;
4157
4158 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4159 }
3561 scsi_for_each_sg(cmd, sg, use_sg, i) { 4160 scsi_for_each_sg(cmd, sg, use_sg, i) {
3562 addr64 = (u64) sg_dma_address(sg); 4161 addr64 = (u64) sg_dma_address(sg);
3563 len = sg_dma_len(sg); 4162 len = sg_dma_len(sg);
@@ -3602,14 +4201,22 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3602 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT); 4201 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
3603 memcpy(cp->cdb, cdb, sizeof(cp->cdb)); 4202 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
3604 4203
3605 /* fill in sg elements */
3606 cp->sg_count = (u8) use_sg;
3607
3608 cp->data_len = cpu_to_le32(total_len); 4204 cp->data_len = cpu_to_le32(total_len);
3609 cp->err_ptr = cpu_to_le64(c->busaddr + 4205 cp->err_ptr = cpu_to_le64(c->busaddr +
3610 offsetof(struct io_accel2_cmd, error_data)); 4206 offsetof(struct io_accel2_cmd, error_data));
3611 cp->err_len = cpu_to_le32(sizeof(cp->error_data)); 4207 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
3612 4208
4209 /* fill in sg elements */
4210 if (use_sg > h->ioaccel_maxsg) {
4211 cp->sg_count = 1;
4212 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4213 atomic_dec(&phys_disk->ioaccel_cmds_out);
4214 scsi_dma_unmap(cmd);
4215 return -1;
4216 }
4217 } else
4218 cp->sg_count = (u8) use_sg;
4219
3613 enqueue_cmd_and_start_io(h, c); 4220 enqueue_cmd_and_start_io(h, c);
3614 return 0; 4221 return 0;
3615} 4222}
@@ -3992,7 +4599,11 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3992 dev->phys_disk[map_index]); 4599 dev->phys_disk[map_index]);
3993} 4600}
3994 4601
3995/* Submit commands down the "normal" RAID stack path */ 4602/*
4603 * Submit commands down the "normal" RAID stack path
4604 * All callers to hpsa_ciss_submit must check lockup_detected
4605 * beforehand, before (opt.) and after calling cmd_alloc
4606 */
3996static int hpsa_ciss_submit(struct ctlr_info *h, 4607static int hpsa_ciss_submit(struct ctlr_info *h,
3997 struct CommandList *c, struct scsi_cmnd *cmd, 4608 struct CommandList *c, struct scsi_cmnd *cmd,
3998 unsigned char scsi3addr[]) 4609 unsigned char scsi3addr[])
@@ -4007,7 +4618,6 @@ static int hpsa_ciss_submit(struct ctlr_info *h,
4007 /* Fill in the request block... */ 4618 /* Fill in the request block... */
4008 4619
4009 c->Request.Timeout = 0; 4620 c->Request.Timeout = 0;
4010 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4011 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 4621 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4012 c->Request.CDBLen = cmd->cmd_len; 4622 c->Request.CDBLen = cmd->cmd_len;
4013 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 4623 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
@@ -4050,7 +4660,7 @@ static int hpsa_ciss_submit(struct ctlr_info *h,
4050 } 4660 }
4051 4661
4052 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ 4662 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
4053 cmd_free(h, c); 4663 hpsa_cmd_resolve_and_free(h, c);
4054 return SCSI_MLQUEUE_HOST_BUSY; 4664 return SCSI_MLQUEUE_HOST_BUSY;
4055 } 4665 }
4056 enqueue_cmd_and_start_io(h, c); 4666 enqueue_cmd_and_start_io(h, c);
@@ -4058,25 +4668,125 @@ static int hpsa_ciss_submit(struct ctlr_info *h,
4058 return 0; 4668 return 0;
4059} 4669}
4060 4670
4671static void hpsa_cmd_init(struct ctlr_info *h, int index,
4672 struct CommandList *c)
4673{
4674 dma_addr_t cmd_dma_handle, err_dma_handle;
4675
4676 /* Zero out all of commandlist except the last field, refcount */
4677 memset(c, 0, offsetof(struct CommandList, refcount));
4678 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
4679 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4680 c->err_info = h->errinfo_pool + index;
4681 memset(c->err_info, 0, sizeof(*c->err_info));
4682 err_dma_handle = h->errinfo_pool_dhandle
4683 + index * sizeof(*c->err_info);
4684 c->cmdindex = index;
4685 c->busaddr = (u32) cmd_dma_handle;
4686 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4687 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4688 c->h = h;
4689 c->scsi_cmd = SCSI_CMD_IDLE;
4690}
4691
4692static void hpsa_preinitialize_commands(struct ctlr_info *h)
4693{
4694 int i;
4695
4696 for (i = 0; i < h->nr_cmds; i++) {
4697 struct CommandList *c = h->cmd_pool + i;
4698
4699 hpsa_cmd_init(h, i, c);
4700 atomic_set(&c->refcount, 0);
4701 }
4702}
4703
4704static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
4705 struct CommandList *c)
4706{
4707 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4708
4709 BUG_ON(c->cmdindex != index);
4710
4711 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4712 memset(c->err_info, 0, sizeof(*c->err_info));
4713 c->busaddr = (u32) cmd_dma_handle;
4714}
4715
4716static int hpsa_ioaccel_submit(struct ctlr_info *h,
4717 struct CommandList *c, struct scsi_cmnd *cmd,
4718 unsigned char *scsi3addr)
4719{
4720 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4721 int rc = IO_ACCEL_INELIGIBLE;
4722
4723 cmd->host_scribble = (unsigned char *) c;
4724
4725 if (dev->offload_enabled) {
4726 hpsa_cmd_init(h, c->cmdindex, c);
4727 c->cmd_type = CMD_SCSI;
4728 c->scsi_cmd = cmd;
4729 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4730 if (rc < 0) /* scsi_dma_map failed. */
4731 rc = SCSI_MLQUEUE_HOST_BUSY;
4732 } else if (dev->hba_ioaccel_enabled) {
4733 hpsa_cmd_init(h, c->cmdindex, c);
4734 c->cmd_type = CMD_SCSI;
4735 c->scsi_cmd = cmd;
4736 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4737 if (rc < 0) /* scsi_dma_map failed. */
4738 rc = SCSI_MLQUEUE_HOST_BUSY;
4739 }
4740 return rc;
4741}
4742
4061static void hpsa_command_resubmit_worker(struct work_struct *work) 4743static void hpsa_command_resubmit_worker(struct work_struct *work)
4062{ 4744{
4063 struct scsi_cmnd *cmd; 4745 struct scsi_cmnd *cmd;
4064 struct hpsa_scsi_dev_t *dev; 4746 struct hpsa_scsi_dev_t *dev;
4065 struct CommandList *c = 4747 struct CommandList *c = container_of(work, struct CommandList, work);
4066 container_of(work, struct CommandList, work);
4067 4748
4068 cmd = c->scsi_cmd; 4749 cmd = c->scsi_cmd;
4069 dev = cmd->device->hostdata; 4750 dev = cmd->device->hostdata;
4070 if (!dev) { 4751 if (!dev) {
4071 cmd->result = DID_NO_CONNECT << 16; 4752 cmd->result = DID_NO_CONNECT << 16;
4072 cmd->scsi_done(cmd); 4753 return hpsa_cmd_free_and_done(c->h, c, cmd);
4073 return; 4754 }
4755 if (c->reset_pending)
4756 return hpsa_cmd_resolve_and_free(c->h, c);
4757 if (c->abort_pending)
4758 return hpsa_cmd_abort_and_free(c->h, c, cmd);
4759 if (c->cmd_type == CMD_IOACCEL2) {
4760 struct ctlr_info *h = c->h;
4761 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
4762 int rc;
4763
4764 if (c2->error_data.serv_response ==
4765 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
4766 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
4767 if (rc == 0)
4768 return;
4769 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4770 /*
4771 * If we get here, it means dma mapping failed.
4772 * Try again via scsi mid layer, which will
4773 * then get SCSI_MLQUEUE_HOST_BUSY.
4774 */
4775 cmd->result = DID_IMM_RETRY << 16;
4776 return hpsa_cmd_free_and_done(h, c, cmd);
4777 }
4778 /* else, fall thru and resubmit down CISS path */
4779 }
4074 } 4780 }
4781 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
4075 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) { 4782 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4076 /* 4783 /*
4077 * If we get here, it means dma mapping failed. Try 4784 * If we get here, it means dma mapping failed. Try
4078 * again via scsi mid layer, which will then get 4785 * again via scsi mid layer, which will then get
4079 * SCSI_MLQUEUE_HOST_BUSY. 4786 * SCSI_MLQUEUE_HOST_BUSY.
4787 *
4788 * hpsa_ciss_submit will have already freed c
4789 * if it encountered a dma mapping failure.
4080 */ 4790 */
4081 cmd->result = DID_IMM_RETRY << 16; 4791 cmd->result = DID_IMM_RETRY << 16;
4082 cmd->scsi_done(cmd); 4792 cmd->scsi_done(cmd);
@@ -4094,30 +4804,24 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4094 4804
4095 /* Get the ptr to our adapter structure out of cmd->host. */ 4805 /* Get the ptr to our adapter structure out of cmd->host. */
4096 h = sdev_to_hba(cmd->device); 4806 h = sdev_to_hba(cmd->device);
4807
4808 BUG_ON(cmd->request->tag < 0);
4809
4097 dev = cmd->device->hostdata; 4810 dev = cmd->device->hostdata;
4098 if (!dev) { 4811 if (!dev) {
4099 cmd->result = DID_NO_CONNECT << 16; 4812 cmd->result = DID_NO_CONNECT << 16;
4100 cmd->scsi_done(cmd); 4813 cmd->scsi_done(cmd);
4101 return 0; 4814 return 0;
4102 } 4815 }
4816
4103 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 4817 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
4104 4818
4105 if (unlikely(lockup_detected(h))) { 4819 if (unlikely(lockup_detected(h))) {
4106 cmd->result = DID_ERROR << 16; 4820 cmd->result = DID_NO_CONNECT << 16;
4107 cmd->scsi_done(cmd);
4108 return 0;
4109 }
4110 c = cmd_alloc(h);
4111 if (c == NULL) { /* trouble... */
4112 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
4113 return SCSI_MLQUEUE_HOST_BUSY;
4114 }
4115 if (unlikely(lockup_detected(h))) {
4116 cmd->result = DID_ERROR << 16;
4117 cmd_free(h, c);
4118 cmd->scsi_done(cmd); 4821 cmd->scsi_done(cmd);
4119 return 0; 4822 return 0;
4120 } 4823 }
4824 c = cmd_tagged_alloc(h, cmd);
4121 4825
4122 /* 4826 /*
4123 * Call alternate submit routine for I/O accelerated commands. 4827 * Call alternate submit routine for I/O accelerated commands.
@@ -4126,27 +4830,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4126 if (likely(cmd->retries == 0 && 4830 if (likely(cmd->retries == 0 &&
4127 cmd->request->cmd_type == REQ_TYPE_FS && 4831 cmd->request->cmd_type == REQ_TYPE_FS &&
4128 h->acciopath_status)) { 4832 h->acciopath_status)) {
4129 4833 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
4130 cmd->host_scribble = (unsigned char *) c; 4834 if (rc == 0)
4131 c->cmd_type = CMD_SCSI; 4835 return 0;
4132 c->scsi_cmd = cmd; 4836 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4133 4837 hpsa_cmd_resolve_and_free(h, c);
4134 if (dev->offload_enabled) { 4838 return SCSI_MLQUEUE_HOST_BUSY;
4135 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4136 if (rc == 0)
4137 return 0; /* Sent on ioaccel path */
4138 if (rc < 0) { /* scsi_dma_map failed. */
4139 cmd_free(h, c);
4140 return SCSI_MLQUEUE_HOST_BUSY;
4141 }
4142 } else if (dev->ioaccel_handle) {
4143 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4144 if (rc == 0)
4145 return 0; /* Sent on direct map path */
4146 if (rc < 0) { /* scsi_dma_map failed. */
4147 cmd_free(h, c);
4148 return SCSI_MLQUEUE_HOST_BUSY;
4149 }
4150 } 4839 }
4151 } 4840 }
4152 return hpsa_ciss_submit(h, c, cmd, scsi3addr); 4841 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
@@ -4228,22 +4917,16 @@ static int hpsa_scan_finished(struct Scsi_Host *sh,
4228 return finished; 4917 return finished;
4229} 4918}
4230 4919
4231static void hpsa_unregister_scsi(struct ctlr_info *h) 4920static int hpsa_scsi_host_alloc(struct ctlr_info *h)
4232{
4233 /* we are being forcibly unloaded, and may not refuse. */
4234 scsi_remove_host(h->scsi_host);
4235 scsi_host_put(h->scsi_host);
4236 h->scsi_host = NULL;
4237}
4238
4239static int hpsa_register_scsi(struct ctlr_info *h)
4240{ 4921{
4241 struct Scsi_Host *sh; 4922 struct Scsi_Host *sh;
4242 int error; 4923 int error;
4243 4924
4244 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); 4925 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4245 if (sh == NULL) 4926 if (sh == NULL) {
4246 goto fail; 4927 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
4928 return -ENOMEM;
4929 }
4247 4930
4248 sh->io_port = 0; 4931 sh->io_port = 0;
4249 sh->n_io_port = 0; 4932 sh->n_io_port = 0;
@@ -4252,80 +4935,156 @@ static int hpsa_register_scsi(struct ctlr_info *h)
4252 sh->max_cmd_len = MAX_COMMAND_SIZE; 4935 sh->max_cmd_len = MAX_COMMAND_SIZE;
4253 sh->max_lun = HPSA_MAX_LUN; 4936 sh->max_lun = HPSA_MAX_LUN;
4254 sh->max_id = HPSA_MAX_LUN; 4937 sh->max_id = HPSA_MAX_LUN;
4255 sh->can_queue = h->nr_cmds - 4938 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
4256 HPSA_CMDS_RESERVED_FOR_ABORTS -
4257 HPSA_CMDS_RESERVED_FOR_DRIVER -
4258 HPSA_MAX_CONCURRENT_PASSTHRUS;
4259 sh->cmd_per_lun = sh->can_queue; 4939 sh->cmd_per_lun = sh->can_queue;
4260 sh->sg_tablesize = h->maxsgentries; 4940 sh->sg_tablesize = h->maxsgentries;
4261 h->scsi_host = sh;
4262 sh->hostdata[0] = (unsigned long) h; 4941 sh->hostdata[0] = (unsigned long) h;
4263 sh->irq = h->intr[h->intr_mode]; 4942 sh->irq = h->intr[h->intr_mode];
4264 sh->unique_id = sh->irq; 4943 sh->unique_id = sh->irq;
4265 error = scsi_add_host(sh, &h->pdev->dev); 4944 error = scsi_init_shared_tag_map(sh, sh->can_queue);
4266 if (error) 4945 if (error) {
4267 goto fail_host_put; 4946 dev_err(&h->pdev->dev,
4268 scsi_scan_host(sh); 4947 "%s: scsi_init_shared_tag_map failed for controller %d\n",
4948 __func__, h->ctlr);
4949 scsi_host_put(sh);
4950 return error;
4951 }
4952 h->scsi_host = sh;
4269 return 0; 4953 return 0;
4954}
4270 4955
4271 fail_host_put: 4956static int hpsa_scsi_add_host(struct ctlr_info *h)
4272 dev_err(&h->pdev->dev, "%s: scsi_add_host" 4957{
4273 " failed for controller %d\n", __func__, h->ctlr); 4958 int rv;
4274 scsi_host_put(sh); 4959
4275 return error; 4960 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
4276 fail: 4961 if (rv) {
4277 dev_err(&h->pdev->dev, "%s: scsi_host_alloc" 4962 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
4278 " failed for controller %d\n", __func__, h->ctlr); 4963 return rv;
4279 return -ENOMEM; 4964 }
4965 scsi_scan_host(h->scsi_host);
4966 return 0;
4280} 4967}
4281 4968
4282static int wait_for_device_to_become_ready(struct ctlr_info *h, 4969/*
4283 unsigned char lunaddr[]) 4970 * The block layer has already gone to the trouble of picking out a unique,
4971 * small-integer tag for this request. We use an offset from that value as
4972 * an index to select our command block. (The offset allows us to reserve the
4973 * low-numbered entries for our own uses.)
4974 */
4975static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
4976{
4977 int idx = scmd->request->tag;
4978
4979 if (idx < 0)
4980 return idx;
4981
4982 /* Offset to leave space for internal cmds. */
4983 return idx += HPSA_NRESERVED_CMDS;
4984}
4985
4986/*
4987 * Send a TEST_UNIT_READY command to the specified LUN using the specified
4988 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
4989 */
4990static int hpsa_send_test_unit_ready(struct ctlr_info *h,
4991 struct CommandList *c, unsigned char lunaddr[],
4992 int reply_queue)
4993{
4994 int rc;
4995
4996 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4997 (void) fill_cmd(c, TEST_UNIT_READY, h,
4998 NULL, 0, 0, lunaddr, TYPE_CMD);
4999 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5000 if (rc)
5001 return rc;
5002 /* no unmap needed here because no data xfer. */
5003
5004 /* Check if the unit is already ready. */
5005 if (c->err_info->CommandStatus == CMD_SUCCESS)
5006 return 0;
5007
5008 /*
5009 * The first command sent after reset will receive "unit attention" to
5010 * indicate that the LUN has been reset...this is actually what we're
5011 * looking for (but, success is good too).
5012 */
5013 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5014 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5015 (c->err_info->SenseInfo[2] == NO_SENSE ||
5016 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5017 return 0;
5018
5019 return 1;
5020}
5021
5022/*
5023 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5024 * returns zero when the unit is ready, and non-zero when giving up.
5025 */
5026static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5027 struct CommandList *c,
5028 unsigned char lunaddr[], int reply_queue)
4284{ 5029{
4285 int rc; 5030 int rc;
4286 int count = 0; 5031 int count = 0;
4287 int waittime = 1; /* seconds */ 5032 int waittime = 1; /* seconds */
4288 struct CommandList *c;
4289
4290 c = cmd_alloc(h);
4291 if (!c) {
4292 dev_warn(&h->pdev->dev, "out of memory in "
4293 "wait_for_device_to_become_ready.\n");
4294 return IO_ERROR;
4295 }
4296 5033
4297 /* Send test unit ready until device ready, or give up. */ 5034 /* Send test unit ready until device ready, or give up. */
4298 while (count < HPSA_TUR_RETRY_LIMIT) { 5035 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
4299 5036
4300 /* Wait for a bit. do this first, because if we send 5037 /*
5038 * Wait for a bit. do this first, because if we send
4301 * the TUR right away, the reset will just abort it. 5039 * the TUR right away, the reset will just abort it.
4302 */ 5040 */
4303 msleep(1000 * waittime); 5041 msleep(1000 * waittime);
4304 count++; 5042
4305 rc = 0; /* Device ready. */ 5043 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5044 if (!rc)
5045 break;
4306 5046
4307 /* Increase wait time with each try, up to a point. */ 5047 /* Increase wait time with each try, up to a point. */
4308 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) 5048 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4309 waittime = waittime * 2; 5049 waittime *= 2;
4310 5050
4311 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ 5051 dev_warn(&h->pdev->dev,
4312 (void) fill_cmd(c, TEST_UNIT_READY, h, 5052 "waiting %d secs for device to become ready.\n",
4313 NULL, 0, 0, lunaddr, TYPE_CMD); 5053 waittime);
4314 hpsa_scsi_do_simple_cmd_core(h, c); 5054 }
4315 /* no unmap needed here because no data xfer. */
4316 5055
4317 if (c->err_info->CommandStatus == CMD_SUCCESS) 5056 return rc;
4318 break; 5057}
4319 5058
4320 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 5059static int wait_for_device_to_become_ready(struct ctlr_info *h,
4321 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && 5060 unsigned char lunaddr[],
4322 (c->err_info->SenseInfo[2] == NO_SENSE || 5061 int reply_queue)
4323 c->err_info->SenseInfo[2] == UNIT_ATTENTION)) 5062{
4324 break; 5063 int first_queue;
5064 int last_queue;
5065 int rq;
5066 int rc = 0;
5067 struct CommandList *c;
4325 5068
4326 dev_warn(&h->pdev->dev, "waiting %d secs " 5069 c = cmd_alloc(h);
4327 "for device to become ready.\n", waittime); 5070
4328 rc = 1; /* device not ready. */ 5071 /*
5072 * If no specific reply queue was requested, then send the TUR
5073 * repeatedly, requesting a reply on each reply queue; otherwise execute
5074 * the loop exactly once using only the specified queue.
5075 */
5076 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5077 first_queue = 0;
5078 last_queue = h->nreply_queues - 1;
5079 } else {
5080 first_queue = reply_queue;
5081 last_queue = reply_queue;
5082 }
5083
5084 for (rq = first_queue; rq <= last_queue; rq++) {
5085 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5086 if (rc)
5087 break;
4329 } 5088 }
4330 5089
4331 if (rc) 5090 if (rc)
@@ -4345,6 +5104,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4345 int rc; 5104 int rc;
4346 struct ctlr_info *h; 5105 struct ctlr_info *h;
4347 struct hpsa_scsi_dev_t *dev; 5106 struct hpsa_scsi_dev_t *dev;
5107 char msg[40];
4348 5108
4349 /* find the controller to which the command to be aborted was sent */ 5109 /* find the controller to which the command to be aborted was sent */
4350 h = sdev_to_hba(scsicmd->device); 5110 h = sdev_to_hba(scsicmd->device);
@@ -4356,19 +5116,38 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4356 5116
4357 dev = scsicmd->device->hostdata; 5117 dev = scsicmd->device->hostdata;
4358 if (!dev) { 5118 if (!dev) {
4359 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " 5119 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
4360 "device lookup failed.\n");
4361 return FAILED; 5120 return FAILED;
4362 } 5121 }
4363 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", 5122
4364 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 5123 /* if controller locked up, we can guarantee command won't complete */
4365 /* send a reset to the SCSI LUN which the command was sent to */ 5124 if (lockup_detected(h)) {
4366 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN); 5125 sprintf(msg, "cmd %d RESET FAILED, lockup detected",
4367 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) 5126 hpsa_get_cmd_index(scsicmd));
5127 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5128 return FAILED;
5129 }
5130
5131 /* this reset request might be the result of a lockup; check */
5132 if (detect_controller_lockup(h)) {
5133 sprintf(msg, "cmd %d RESET FAILED, new lockup detected",
5134 hpsa_get_cmd_index(scsicmd));
5135 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5136 return FAILED;
5137 }
5138
5139 /* Do not attempt on controller */
5140 if (is_hba_lunid(dev->scsi3addr))
4368 return SUCCESS; 5141 return SUCCESS;
4369 5142
4370 dev_warn(&h->pdev->dev, "resetting device failed.\n"); 5143 hpsa_show_dev_msg(KERN_WARNING, h, dev, "resetting");
4371 return FAILED; 5144
5145 /* send a reset to the SCSI LUN which the command was sent to */
5146 rc = hpsa_do_reset(h, dev, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
5147 DEFAULT_REPLY_QUEUE);
5148 sprintf(msg, "reset %s", rc == 0 ? "completed successfully" : "failed");
5149 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5150 return rc == 0 ? SUCCESS : FAILED;
4372} 5151}
4373 5152
4374static void swizzle_abort_tag(u8 *tag) 5153static void swizzle_abort_tag(u8 *tag)
@@ -4412,7 +5191,7 @@ static void hpsa_get_tag(struct ctlr_info *h,
4412} 5191}
4413 5192
4414static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, 5193static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4415 struct CommandList *abort, int swizzle) 5194 struct CommandList *abort, int reply_queue)
4416{ 5195{
4417 int rc = IO_OK; 5196 int rc = IO_OK;
4418 struct CommandList *c; 5197 struct CommandList *c;
@@ -4420,19 +5199,15 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4420 __le32 tagupper, taglower; 5199 __le32 tagupper, taglower;
4421 5200
4422 c = cmd_alloc(h); 5201 c = cmd_alloc(h);
4423 if (c == NULL) { /* trouble... */
4424 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
4425 return -ENOMEM;
4426 }
4427 5202
4428 /* fill_cmd can't fail here, no buffer to map */ 5203 /* fill_cmd can't fail here, no buffer to map */
4429 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort, 5204 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
4430 0, 0, scsi3addr, TYPE_MSG); 5205 0, 0, scsi3addr, TYPE_MSG);
4431 if (swizzle) 5206 if (h->needs_abort_tags_swizzled)
4432 swizzle_abort_tag(&c->Request.CDB[4]); 5207 swizzle_abort_tag(&c->Request.CDB[4]);
4433 hpsa_scsi_do_simple_cmd_core(h, c); 5208 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
4434 hpsa_get_tag(h, abort, &taglower, &tagupper); 5209 hpsa_get_tag(h, abort, &taglower, &tagupper);
4435 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n", 5210 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
4436 __func__, tagupper, taglower); 5211 __func__, tagupper, taglower);
4437 /* no unmap needed here because no data xfer. */ 5212 /* no unmap needed here because no data xfer. */
4438 5213
@@ -4440,6 +5215,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4440 switch (ei->CommandStatus) { 5215 switch (ei->CommandStatus) {
4441 case CMD_SUCCESS: 5216 case CMD_SUCCESS:
4442 break; 5217 break;
5218 case CMD_TMF_STATUS:
5219 rc = hpsa_evaluate_tmf_status(h, c);
5220 break;
4443 case CMD_UNABORTABLE: /* Very common, don't make noise. */ 5221 case CMD_UNABORTABLE: /* Very common, don't make noise. */
4444 rc = -1; 5222 rc = -1;
4445 break; 5223 break;
@@ -4456,6 +5234,48 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4456 return rc; 5234 return rc;
4457} 5235}
4458 5236
5237static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5238 struct CommandList *command_to_abort, int reply_queue)
5239{
5240 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5241 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5242 struct io_accel2_cmd *c2a =
5243 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
5244 struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
5245 struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5246
5247 /*
5248 * We're overlaying struct hpsa_tmf_struct on top of something which
5249 * was allocated as a struct io_accel2_cmd, so we better be sure it
5250 * actually fits, and doesn't overrun the error info space.
5251 */
5252 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5253 sizeof(struct io_accel2_cmd));
5254 BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5255 offsetof(struct hpsa_tmf_struct, error_len) +
5256 sizeof(ac->error_len));
5257
5258 c->cmd_type = IOACCEL2_TMF;
5259 c->scsi_cmd = SCSI_CMD_BUSY;
5260
5261 /* Adjust the DMA address to point to the accelerated command buffer */
5262 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
5263 (c->cmdindex * sizeof(struct io_accel2_cmd));
5264 BUG_ON(c->busaddr & 0x0000007F);
5265
5266 memset(ac, 0, sizeof(*c2)); /* yes this is correct */
5267 ac->iu_type = IOACCEL2_IU_TMF_TYPE;
5268 ac->reply_queue = reply_queue;
5269 ac->tmf = IOACCEL2_TMF_ABORT;
5270 ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
5271 memset(ac->lun_id, 0, sizeof(ac->lun_id));
5272 ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5273 ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
5274 ac->error_ptr = cpu_to_le64(c->busaddr +
5275 offsetof(struct io_accel2_cmd, error_data));
5276 ac->error_len = cpu_to_le32(sizeof(c2->error_data));
5277}
5278
4459/* ioaccel2 path firmware cannot handle abort task requests. 5279/* ioaccel2 path firmware cannot handle abort task requests.
4460 * Change abort requests to physical target reset, and send to the 5280 * Change abort requests to physical target reset, and send to the
4461 * address of the physical disk used for the ioaccel 2 command. 5281 * address of the physical disk used for the ioaccel 2 command.
@@ -4464,7 +5284,7 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4464 */ 5284 */
4465 5285
4466static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, 5286static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4467 unsigned char *scsi3addr, struct CommandList *abort) 5287 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
4468{ 5288{
4469 int rc = IO_OK; 5289 int rc = IO_OK;
4470 struct scsi_cmnd *scmd; /* scsi command within request being aborted */ 5290 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
@@ -4483,8 +5303,9 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4483 5303
4484 if (h->raid_offload_debug > 0) 5304 if (h->raid_offload_debug > 0)
4485 dev_info(&h->pdev->dev, 5305 dev_info(&h->pdev->dev,
4486 "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 5306 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4487 h->scsi_host->host_no, dev->bus, dev->target, dev->lun, 5307 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
5308 "Reset as abort",
4488 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], 5309 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
4489 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); 5310 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
4490 5311
@@ -4506,7 +5327,7 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4506 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 5327 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4507 psa[0], psa[1], psa[2], psa[3], 5328 psa[0], psa[1], psa[2], psa[3],
4508 psa[4], psa[5], psa[6], psa[7]); 5329 psa[4], psa[5], psa[6], psa[7]);
4509 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET); 5330 rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
4510 if (rc != 0) { 5331 if (rc != 0) {
4511 dev_warn(&h->pdev->dev, 5332 dev_warn(&h->pdev->dev,
4512 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 5333 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
@@ -4516,7 +5337,7 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4516 } 5337 }
4517 5338
4518 /* wait for device to recover */ 5339 /* wait for device to recover */
4519 if (wait_for_device_to_become_ready(h, psa) != 0) { 5340 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
4520 dev_warn(&h->pdev->dev, 5341 dev_warn(&h->pdev->dev,
4521 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 5342 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4522 psa[0], psa[1], psa[2], psa[3], 5343 psa[0], psa[1], psa[2], psa[3],
@@ -4533,25 +5354,94 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4533 return rc; /* success */ 5354 return rc; /* success */
4534} 5355}
4535 5356
4536/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to 5357static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
4537 * tell which kind we're dealing with, so we send the abort both ways. There 5358 struct CommandList *abort, int reply_queue)
4538 * shouldn't be any collisions between swizzled and unswizzled tags due to the 5359{
4539 * way we construct our tags but we check anyway in case the assumptions which 5360 int rc = IO_OK;
4540 * make this true someday become false. 5361 struct CommandList *c;
4541 */ 5362 __le32 taglower, tagupper;
5363 struct hpsa_scsi_dev_t *dev;
5364 struct io_accel2_cmd *c2;
5365
5366 dev = abort->scsi_cmd->device->hostdata;
5367 if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
5368 return -1;
5369
5370 c = cmd_alloc(h);
5371 setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
5372 c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5373 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5374 hpsa_get_tag(h, abort, &taglower, &tagupper);
5375 dev_dbg(&h->pdev->dev,
5376 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
5377 __func__, tagupper, taglower);
5378 /* no unmap needed here because no data xfer. */
5379
5380 dev_dbg(&h->pdev->dev,
5381 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
5382 __func__, tagupper, taglower, c2->error_data.serv_response);
5383 switch (c2->error_data.serv_response) {
5384 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
5385 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
5386 rc = 0;
5387 break;
5388 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
5389 case IOACCEL2_SERV_RESPONSE_FAILURE:
5390 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
5391 rc = -1;
5392 break;
5393 default:
5394 dev_warn(&h->pdev->dev,
5395 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
5396 __func__, tagupper, taglower,
5397 c2->error_data.serv_response);
5398 rc = -1;
5399 }
5400 cmd_free(h, c);
5401 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
5402 tagupper, taglower);
5403 return rc;
5404}
5405
4542static int hpsa_send_abort_both_ways(struct ctlr_info *h, 5406static int hpsa_send_abort_both_ways(struct ctlr_info *h,
4543 unsigned char *scsi3addr, struct CommandList *abort) 5407 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
4544{ 5408{
4545 /* ioccelerator mode 2 commands should be aborted via the 5409 /*
5410 * ioccelerator mode 2 commands should be aborted via the
4546 * accelerated path, since RAID path is unaware of these commands, 5411 * accelerated path, since RAID path is unaware of these commands,
4547 * but underlying firmware can't handle abort TMF. 5412 * but not all underlying firmware can handle abort TMF.
4548 * Change abort to physical device reset. 5413 * Change abort to physical device reset when abort TMF is unsupported.
4549 */ 5414 */
4550 if (abort->cmd_type == CMD_IOACCEL2) 5415 if (abort->cmd_type == CMD_IOACCEL2) {
4551 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); 5416 if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)
5417 return hpsa_send_abort_ioaccel2(h, abort,
5418 reply_queue);
5419 else
5420 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
5421 abort, reply_queue);
5422 }
5423 return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
5424}
4552 5425
4553 return hpsa_send_abort(h, scsi3addr, abort, 0) && 5426/* Find out which reply queue a command was meant to return on */
4554 hpsa_send_abort(h, scsi3addr, abort, 1); 5427static int hpsa_extract_reply_queue(struct ctlr_info *h,
5428 struct CommandList *c)
5429{
5430 if (c->cmd_type == CMD_IOACCEL2)
5431 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
5432 return c->Header.ReplyQueue;
5433}
5434
5435/*
5436 * Limit concurrency of abort commands to prevent
5437 * over-subscription of commands
5438 */
5439static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
5440{
5441#define ABORT_CMD_WAIT_MSECS 5000
5442 return !wait_event_timeout(h->abort_cmd_wait_queue,
5443 atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
5444 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
4555} 5445}
4556 5446
4557/* Send an abort for the specified command. 5447/* Send an abort for the specified command.
@@ -4561,7 +5451,7 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h,
4561static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) 5451static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4562{ 5452{
4563 5453
4564 int i, rc; 5454 int rc;
4565 struct ctlr_info *h; 5455 struct ctlr_info *h;
4566 struct hpsa_scsi_dev_t *dev; 5456 struct hpsa_scsi_dev_t *dev;
4567 struct CommandList *abort; /* pointer to command to be aborted */ 5457 struct CommandList *abort; /* pointer to command to be aborted */
@@ -4569,27 +5459,19 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4569 char msg[256]; /* For debug messaging. */ 5459 char msg[256]; /* For debug messaging. */
4570 int ml = 0; 5460 int ml = 0;
4571 __le32 tagupper, taglower; 5461 __le32 tagupper, taglower;
4572 int refcount; 5462 int refcount, reply_queue;
4573 5463
4574 /* Find the controller of the command to be aborted */ 5464 if (sc == NULL)
4575 h = sdev_to_hba(sc->device);
4576 if (WARN(h == NULL,
4577 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
4578 return FAILED; 5465 return FAILED;
4579 5466
4580 if (lockup_detected(h)) 5467 if (sc->device == NULL)
4581 return FAILED; 5468 return FAILED;
4582 5469
4583 /* Check that controller supports some kind of task abort */ 5470 /* Find the controller of the command to be aborted */
4584 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && 5471 h = sdev_to_hba(sc->device);
4585 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 5472 if (h == NULL)
4586 return FAILED; 5473 return FAILED;
4587 5474
4588 memset(msg, 0, sizeof(msg));
4589 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ",
4590 h->scsi_host->host_no, sc->device->channel,
4591 sc->device->id, sc->device->lun);
4592
4593 /* Find the device of the command to be aborted */ 5475 /* Find the device of the command to be aborted */
4594 dev = sc->device->hostdata; 5476 dev = sc->device->hostdata;
4595 if (!dev) { 5477 if (!dev) {
@@ -4598,6 +5480,31 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4598 return FAILED; 5480 return FAILED;
4599 } 5481 }
4600 5482
5483 /* If controller locked up, we can guarantee command won't complete */
5484 if (lockup_detected(h)) {
5485 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5486 "ABORT FAILED, lockup detected");
5487 return FAILED;
5488 }
5489
5490 /* This is a good time to check if controller lockup has occurred */
5491 if (detect_controller_lockup(h)) {
5492 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5493 "ABORT FAILED, new lockup detected");
5494 return FAILED;
5495 }
5496
5497 /* Check that controller supports some kind of task abort */
5498 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
5499 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
5500 return FAILED;
5501
5502 memset(msg, 0, sizeof(msg));
5503 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
5504 h->scsi_host->host_no, sc->device->channel,
5505 sc->device->id, sc->device->lun,
5506 "Aborting command", sc);
5507
4601 /* Get SCSI command to be aborted */ 5508 /* Get SCSI command to be aborted */
4602 abort = (struct CommandList *) sc->host_scribble; 5509 abort = (struct CommandList *) sc->host_scribble;
4603 if (abort == NULL) { 5510 if (abort == NULL) {
@@ -4609,50 +5516,115 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4609 cmd_free(h, abort); 5516 cmd_free(h, abort);
4610 return SUCCESS; 5517 return SUCCESS;
4611 } 5518 }
5519
5520 /* Don't bother trying the abort if we know it won't work. */
5521 if (abort->cmd_type != CMD_IOACCEL2 &&
5522 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
5523 cmd_free(h, abort);
5524 return FAILED;
5525 }
5526
5527 /*
5528 * Check that we're aborting the right command.
5529 * It's possible the CommandList already completed and got re-used.
5530 */
5531 if (abort->scsi_cmd != sc) {
5532 cmd_free(h, abort);
5533 return SUCCESS;
5534 }
5535
5536 abort->abort_pending = true;
4612 hpsa_get_tag(h, abort, &taglower, &tagupper); 5537 hpsa_get_tag(h, abort, &taglower, &tagupper);
5538 reply_queue = hpsa_extract_reply_queue(h, abort);
4613 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); 5539 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
4614 as = abort->scsi_cmd; 5540 as = abort->scsi_cmd;
4615 if (as != NULL) 5541 if (as != NULL)
4616 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", 5542 ml += sprintf(msg+ml,
4617 as->cmnd[0], as->serial_number); 5543 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
4618 dev_dbg(&h->pdev->dev, "%s\n", msg); 5544 as->cmd_len, as->cmnd[0], as->cmnd[1],
4619 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n", 5545 as->serial_number);
4620 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 5546 dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
5547 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
5548
4621 /* 5549 /*
4622 * Command is in flight, or possibly already completed 5550 * Command is in flight, or possibly already completed
4623 * by the firmware (but not to the scsi mid layer) but we can't 5551 * by the firmware (but not to the scsi mid layer) but we can't
4624 * distinguish which. Send the abort down. 5552 * distinguish which. Send the abort down.
4625 */ 5553 */
4626 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort); 5554 if (wait_for_available_abort_cmd(h)) {
5555 dev_warn(&h->pdev->dev,
5556 "%s FAILED, timeout waiting for an abort command to become available.\n",
5557 msg);
5558 cmd_free(h, abort);
5559 return FAILED;
5560 }
5561 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
5562 atomic_inc(&h->abort_cmds_available);
5563 wake_up_all(&h->abort_cmd_wait_queue);
4627 if (rc != 0) { 5564 if (rc != 0) {
4628 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg); 5565 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
4629 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n", 5566 hpsa_show_dev_msg(KERN_WARNING, h, dev,
4630 h->scsi_host->host_no, 5567 "FAILED to abort command");
4631 dev->bus, dev->target, dev->lun);
4632 cmd_free(h, abort); 5568 cmd_free(h, abort);
4633 return FAILED; 5569 return FAILED;
4634 } 5570 }
4635 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg); 5571 dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
5572 wait_event(h->event_sync_wait_queue,
5573 abort->scsi_cmd != sc || lockup_detected(h));
5574 cmd_free(h, abort);
5575 return !lockup_detected(h) ? SUCCESS : FAILED;
5576}
4636 5577
4637 /* If the abort(s) above completed and actually aborted the 5578/*
4638 * command, then the command to be aborted should already be 5579 * For operations with an associated SCSI command, a command block is allocated
4639 * completed. If not, wait around a bit more to see if they 5580 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
4640 * manage to complete normally. 5581 * block request tag as an index into a table of entries. cmd_tagged_free() is
4641 */ 5582 * the complement, although cmd_free() may be called instead.
4642#define ABORT_COMPLETE_WAIT_SECS 30 5583 */
4643 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) { 5584static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
4644 refcount = atomic_read(&abort->refcount); 5585 struct scsi_cmnd *scmd)
4645 if (refcount < 2) { 5586{
4646 cmd_free(h, abort); 5587 int idx = hpsa_get_cmd_index(scmd);
4647 return SUCCESS; 5588 struct CommandList *c = h->cmd_pool + idx;
4648 } else { 5589
4649 msleep(100); 5590 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
4650 } 5591 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
5592 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
5593 /* The index value comes from the block layer, so if it's out of
5594 * bounds, it's probably not our bug.
5595 */
5596 BUG();
4651 } 5597 }
4652 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n", 5598
4653 msg, ABORT_COMPLETE_WAIT_SECS); 5599 atomic_inc(&c->refcount);
4654 cmd_free(h, abort); 5600 if (unlikely(!hpsa_is_cmd_idle(c))) {
4655 return FAILED; 5601 /*
5602 * We expect that the SCSI layer will hand us a unique tag
5603 * value. Thus, there should never be a collision here between
5604 * two requests...because if the selected command isn't idle
5605 * then someone is going to be very disappointed.
5606 */
5607 dev_err(&h->pdev->dev,
5608 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
5609 idx);
5610 if (c->scsi_cmd != NULL)
5611 scsi_print_command(c->scsi_cmd);
5612 scsi_print_command(scmd);
5613 }
5614
5615 hpsa_cmd_partial_init(h, idx, c);
5616 return c;
5617}
5618
5619static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
5620{
5621 /*
5622 * Release our reference to the block. We don't need to do anything
5623 * else to free it, because it is accessed by index. (There's no point
5624 * in checking the result of the decrement, since we cannot guarantee
5625 * that there isn't a concurrent abort which is also accessing it.)
5626 */
5627 (void)atomic_dec(&c->refcount);
4656} 5628}
4657 5629
4658/* 5630/*
@@ -4660,16 +5632,15 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4660 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 5632 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
4661 * which ones are free or in use. Lock must be held when calling this. 5633 * which ones are free or in use. Lock must be held when calling this.
4662 * cmd_free() is the complement. 5634 * cmd_free() is the complement.
5635 * This function never gives up and returns NULL. If it hangs,
5636 * another thread must call cmd_free() to free some tags.
4663 */ 5637 */
4664 5638
4665static struct CommandList *cmd_alloc(struct ctlr_info *h) 5639static struct CommandList *cmd_alloc(struct ctlr_info *h)
4666{ 5640{
4667 struct CommandList *c; 5641 struct CommandList *c;
4668 int i; 5642 int refcount, i;
4669 union u64bit temp64; 5643 int offset = 0;
4670 dma_addr_t cmd_dma_handle, err_dma_handle;
4671 int refcount;
4672 unsigned long offset;
4673 5644
4674 /* 5645 /*
4675 * There is some *extremely* small but non-zero chance that that 5646 * There is some *extremely* small but non-zero chance that that
@@ -4681,12 +5652,20 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
4681 * very unlucky thread might be starved anyway, never able to 5652 * very unlucky thread might be starved anyway, never able to
4682 * beat the other threads. In reality, this happens so 5653 * beat the other threads. In reality, this happens so
4683 * infrequently as to be indistinguishable from never. 5654 * infrequently as to be indistinguishable from never.
5655 *
5656 * Note that we start allocating commands before the SCSI host structure
5657 * is initialized. Since the search starts at bit zero, this
5658 * all works, since we have at least one command structure available;
5659 * however, it means that the structures with the low indexes have to be
5660 * reserved for driver-initiated requests, while requests from the block
5661 * layer will use the higher indexes.
4684 */ 5662 */
4685 5663
4686 offset = h->last_allocation; /* benignly racy */
4687 for (;;) { 5664 for (;;) {
4688 i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset); 5665 i = find_next_zero_bit(h->cmd_pool_bits,
4689 if (unlikely(i == h->nr_cmds)) { 5666 HPSA_NRESERVED_CMDS,
5667 offset);
5668 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
4690 offset = 0; 5669 offset = 0;
4691 continue; 5670 continue;
4692 } 5671 }
@@ -4694,35 +5673,23 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
4694 refcount = atomic_inc_return(&c->refcount); 5673 refcount = atomic_inc_return(&c->refcount);
4695 if (unlikely(refcount > 1)) { 5674 if (unlikely(refcount > 1)) {
4696 cmd_free(h, c); /* already in use */ 5675 cmd_free(h, c); /* already in use */
4697 offset = (i + 1) % h->nr_cmds; 5676 offset = (i + 1) % HPSA_NRESERVED_CMDS;
4698 continue; 5677 continue;
4699 } 5678 }
4700 set_bit(i & (BITS_PER_LONG - 1), 5679 set_bit(i & (BITS_PER_LONG - 1),
4701 h->cmd_pool_bits + (i / BITS_PER_LONG)); 5680 h->cmd_pool_bits + (i / BITS_PER_LONG));
4702 break; /* it's ours now. */ 5681 break; /* it's ours now. */
4703 } 5682 }
4704 h->last_allocation = i; /* benignly racy */ 5683 hpsa_cmd_partial_init(h, i, c);
4705
4706 /* Zero out all of commandlist except the last field, refcount */
4707 memset(c, 0, offsetof(struct CommandList, refcount));
4708 c->Header.tag = cpu_to_le64((u64) (i << DIRECT_LOOKUP_SHIFT));
4709 cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c);
4710 c->err_info = h->errinfo_pool + i;
4711 memset(c->err_info, 0, sizeof(*c->err_info));
4712 err_dma_handle = h->errinfo_pool_dhandle
4713 + i * sizeof(*c->err_info);
4714
4715 c->cmdindex = i;
4716
4717 c->busaddr = (u32) cmd_dma_handle;
4718 temp64.val = (u64) err_dma_handle;
4719 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4720 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4721
4722 c->h = h;
4723 return c; 5684 return c;
4724} 5685}
4725 5686
5687/*
5688 * This is the complementary operation to cmd_alloc(). Note, however, in some
5689 * corner cases it may also be used to free blocks allocated by
5690 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
5691 * the clear-bit is harmless.
5692 */
4726static void cmd_free(struct ctlr_info *h, struct CommandList *c) 5693static void cmd_free(struct ctlr_info *h, struct CommandList *c)
4727{ 5694{
4728 if (atomic_dec_and_test(&c->refcount)) { 5695 if (atomic_dec_and_test(&c->refcount)) {
@@ -4900,7 +5867,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4900 if (iocommand.buf_size > 0) { 5867 if (iocommand.buf_size > 0) {
4901 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 5868 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
4902 if (buff == NULL) 5869 if (buff == NULL)
4903 return -EFAULT; 5870 return -ENOMEM;
4904 if (iocommand.Request.Type.Direction & XFER_WRITE) { 5871 if (iocommand.Request.Type.Direction & XFER_WRITE) {
4905 /* Copy the data into the buffer we created */ 5872 /* Copy the data into the buffer we created */
4906 if (copy_from_user(buff, iocommand.buf, 5873 if (copy_from_user(buff, iocommand.buf,
@@ -4913,12 +5880,10 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4913 } 5880 }
4914 } 5881 }
4915 c = cmd_alloc(h); 5882 c = cmd_alloc(h);
4916 if (c == NULL) { 5883
4917 rc = -ENOMEM;
4918 goto out_kfree;
4919 }
4920 /* Fill in the command type */ 5884 /* Fill in the command type */
4921 c->cmd_type = CMD_IOCTL_PEND; 5885 c->cmd_type = CMD_IOCTL_PEND;
5886 c->scsi_cmd = SCSI_CMD_BUSY;
4922 /* Fill in Command Header */ 5887 /* Fill in Command Header */
4923 c->Header.ReplyQueue = 0; /* unused in simple mode */ 5888 c->Header.ReplyQueue = 0; /* unused in simple mode */
4924 if (iocommand.buf_size > 0) { /* buffer to fill */ 5889 if (iocommand.buf_size > 0) { /* buffer to fill */
@@ -4948,10 +5913,14 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4948 c->SG[0].Len = cpu_to_le32(iocommand.buf_size); 5913 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
4949 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ 5914 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
4950 } 5915 }
4951 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 5916 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
4952 if (iocommand.buf_size > 0) 5917 if (iocommand.buf_size > 0)
4953 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 5918 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
4954 check_ioctl_unit_attention(h, c); 5919 check_ioctl_unit_attention(h, c);
5920 if (rc) {
5921 rc = -EIO;
5922 goto out;
5923 }
4955 5924
4956 /* Copy the error information out */ 5925 /* Copy the error information out */
4957 memcpy(&iocommand.error_info, c->err_info, 5926 memcpy(&iocommand.error_info, c->err_info,
@@ -5048,11 +6017,9 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5048 sg_used++; 6017 sg_used++;
5049 } 6018 }
5050 c = cmd_alloc(h); 6019 c = cmd_alloc(h);
5051 if (c == NULL) { 6020
5052 status = -ENOMEM;
5053 goto cleanup1;
5054 }
5055 c->cmd_type = CMD_IOCTL_PEND; 6021 c->cmd_type = CMD_IOCTL_PEND;
6022 c->scsi_cmd = SCSI_CMD_BUSY;
5056 c->Header.ReplyQueue = 0; 6023 c->Header.ReplyQueue = 0;
5057 c->Header.SGList = (u8) sg_used; 6024 c->Header.SGList = (u8) sg_used;
5058 c->Header.SGTotal = cpu_to_le16(sg_used); 6025 c->Header.SGTotal = cpu_to_le16(sg_used);
@@ -5078,10 +6045,15 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5078 } 6045 }
5079 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); 6046 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
5080 } 6047 }
5081 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 6048 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
5082 if (sg_used) 6049 if (sg_used)
5083 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); 6050 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
5084 check_ioctl_unit_attention(h, c); 6051 check_ioctl_unit_attention(h, c);
6052 if (status) {
6053 status = -EIO;
6054 goto cleanup0;
6055 }
6056
5085 /* Copy the error information out */ 6057 /* Copy the error information out */
5086 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); 6058 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5087 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 6059 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
@@ -5163,14 +6135,13 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5163 } 6135 }
5164} 6136}
5165 6137
5166static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, 6138static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5167 u8 reset_type) 6139 u8 reset_type)
5168{ 6140{
5169 struct CommandList *c; 6141 struct CommandList *c;
5170 6142
5171 c = cmd_alloc(h); 6143 c = cmd_alloc(h);
5172 if (!c) 6144
5173 return -ENOMEM;
5174 /* fill_cmd can't fail here, no data buffer to map */ 6145 /* fill_cmd can't fail here, no data buffer to map */
5175 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 6146 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
5176 RAID_CTLR_LUNID, TYPE_MSG); 6147 RAID_CTLR_LUNID, TYPE_MSG);
@@ -5181,7 +6152,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5181 * the command either. This is the last command we will send before 6152 * the command either. This is the last command we will send before
5182 * re-initializing everything, so it doesn't matter and won't leak. 6153 * re-initializing everything, so it doesn't matter and won't leak.
5183 */ 6154 */
5184 return 0; 6155 return;
5185} 6156}
5186 6157
5187static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 6158static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
@@ -5189,9 +6160,10 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5189 int cmd_type) 6160 int cmd_type)
5190{ 6161{
5191 int pci_dir = XFER_NONE; 6162 int pci_dir = XFER_NONE;
5192 struct CommandList *a; /* for commands to be aborted */ 6163 u64 tag; /* for commands to be aborted */
5193 6164
5194 c->cmd_type = CMD_IOCTL_PEND; 6165 c->cmd_type = CMD_IOCTL_PEND;
6166 c->scsi_cmd = SCSI_CMD_BUSY;
5195 c->Header.ReplyQueue = 0; 6167 c->Header.ReplyQueue = 0;
5196 if (buff != NULL && size > 0) { 6168 if (buff != NULL && size > 0) {
5197 c->Header.SGList = 1; 6169 c->Header.SGList = 1;
@@ -5305,10 +6277,10 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5305 c->Request.CDB[7] = 0x00; 6277 c->Request.CDB[7] = 0x00;
5306 break; 6278 break;
5307 case HPSA_ABORT_MSG: 6279 case HPSA_ABORT_MSG:
5308 a = buff; /* point to command to be aborted */ 6280 memcpy(&tag, buff, sizeof(tag));
5309 dev_dbg(&h->pdev->dev, 6281 dev_dbg(&h->pdev->dev,
5310 "Abort Tag:0x%016llx request Tag:0x%016llx", 6282 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
5311 a->Header.tag, c->Header.tag); 6283 tag, c->Header.tag);
5312 c->Request.CDBLen = 16; 6284 c->Request.CDBLen = 16;
5313 c->Request.type_attr_dir = 6285 c->Request.type_attr_dir =
5314 TYPE_ATTR_DIR(cmd_type, 6286 TYPE_ATTR_DIR(cmd_type,
@@ -5319,8 +6291,7 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5319 c->Request.CDB[2] = 0x00; /* reserved */ 6291 c->Request.CDB[2] = 0x00; /* reserved */
5320 c->Request.CDB[3] = 0x00; /* reserved */ 6292 c->Request.CDB[3] = 0x00; /* reserved */
5321 /* Tag to abort goes in CDB[4]-CDB[11] */ 6293 /* Tag to abort goes in CDB[4]-CDB[11] */
5322 memcpy(&c->Request.CDB[4], &a->Header.tag, 6294 memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
5323 sizeof(a->Header.tag));
5324 c->Request.CDB[12] = 0x00; /* reserved */ 6295 c->Request.CDB[12] = 0x00; /* reserved */
5325 c->Request.CDB[13] = 0x00; /* reserved */ 6296 c->Request.CDB[13] = 0x00; /* reserved */
5326 c->Request.CDB[14] = 0x00; /* reserved */ 6297 c->Request.CDB[14] = 0x00; /* reserved */
@@ -5399,7 +6370,7 @@ static inline void finish_cmd(struct CommandList *c)
5399 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI 6370 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
5400 || c->cmd_type == CMD_IOACCEL2)) 6371 || c->cmd_type == CMD_IOACCEL2))
5401 complete_scsi_command(c); 6372 complete_scsi_command(c);
5402 else if (c->cmd_type == CMD_IOCTL_PEND) 6373 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
5403 complete(c->waiting); 6374 complete(c->waiting);
5404} 6375}
5405 6376
@@ -5733,7 +6704,7 @@ static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
5733/* This does a hard reset of the controller using PCI power management 6704/* This does a hard reset of the controller using PCI power management
5734 * states or the using the doorbell register. 6705 * states or the using the doorbell register.
5735 */ 6706 */
5736static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) 6707static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
5737{ 6708{
5738 u64 cfg_offset; 6709 u64 cfg_offset;
5739 u32 cfg_base_addr; 6710 u32 cfg_base_addr;
@@ -5744,7 +6715,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
5744 int rc; 6715 int rc;
5745 struct CfgTable __iomem *cfgtable; 6716 struct CfgTable __iomem *cfgtable;
5746 u32 use_doorbell; 6717 u32 use_doorbell;
5747 u32 board_id;
5748 u16 command_register; 6718 u16 command_register;
5749 6719
5750 /* For controllers as old as the P600, this is very nearly 6720 /* For controllers as old as the P600, this is very nearly
@@ -5760,11 +6730,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
5760 * using the doorbell register. 6730 * using the doorbell register.
5761 */ 6731 */
5762 6732
5763 rc = hpsa_lookup_board_id(pdev, &board_id);
5764 if (rc < 0) {
5765 dev_warn(&pdev->dev, "Board ID not found\n");
5766 return rc;
5767 }
5768 if (!ctlr_is_resettable(board_id)) { 6733 if (!ctlr_is_resettable(board_id)) {
5769 dev_warn(&pdev->dev, "Controller not resettable\n"); 6734 dev_warn(&pdev->dev, "Controller not resettable\n");
5770 return -ENODEV; 6735 return -ENODEV;
@@ -5930,10 +6895,22 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
5930 return -1; 6895 return -1;
5931} 6896}
5932 6897
6898static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
6899{
6900 if (h->msix_vector) {
6901 if (h->pdev->msix_enabled)
6902 pci_disable_msix(h->pdev);
6903 h->msix_vector = 0;
6904 } else if (h->msi_vector) {
6905 if (h->pdev->msi_enabled)
6906 pci_disable_msi(h->pdev);
6907 h->msi_vector = 0;
6908 }
6909}
6910
5933/* If MSI/MSI-X is supported by the kernel we will try to enable it on 6911/* If MSI/MSI-X is supported by the kernel we will try to enable it on
5934 * controllers that are capable. If not, we use legacy INTx mode. 6912 * controllers that are capable. If not, we use legacy INTx mode.
5935 */ 6913 */
5936
5937static void hpsa_interrupt_mode(struct ctlr_info *h) 6914static void hpsa_interrupt_mode(struct ctlr_info *h)
5938{ 6915{
5939#ifdef CONFIG_PCI_MSI 6916#ifdef CONFIG_PCI_MSI
@@ -6064,6 +7041,21 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6064 return 0; 7041 return 0;
6065} 7042}
6066 7043
7044static void hpsa_free_cfgtables(struct ctlr_info *h)
7045{
7046 if (h->transtable) {
7047 iounmap(h->transtable);
7048 h->transtable = NULL;
7049 }
7050 if (h->cfgtable) {
7051 iounmap(h->cfgtable);
7052 h->cfgtable = NULL;
7053 }
7054}
7055
7056/* Find and map CISS config table and transfer table
7057+ * several items must be unmapped (freed) later
7058+ * */
6067static int hpsa_find_cfgtables(struct ctlr_info *h) 7059static int hpsa_find_cfgtables(struct ctlr_info *h)
6068{ 7060{
6069 u64 cfg_offset; 7061 u64 cfg_offset;
@@ -6090,25 +7082,31 @@ static int hpsa_find_cfgtables(struct ctlr_info *h)
6090 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 7082 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6091 cfg_base_addr_index)+cfg_offset+trans_offset, 7083 cfg_base_addr_index)+cfg_offset+trans_offset,
6092 sizeof(*h->transtable)); 7084 sizeof(*h->transtable));
6093 if (!h->transtable) 7085 if (!h->transtable) {
7086 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7087 hpsa_free_cfgtables(h);
6094 return -ENOMEM; 7088 return -ENOMEM;
7089 }
6095 return 0; 7090 return 0;
6096} 7091}
6097 7092
6098static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) 7093static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
6099{ 7094{
6100 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 7095#define MIN_MAX_COMMANDS 16
7096 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7097
7098 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
6101 7099
6102 /* Limit commands in memory limited kdump scenario. */ 7100 /* Limit commands in memory limited kdump scenario. */
6103 if (reset_devices && h->max_commands > 32) 7101 if (reset_devices && h->max_commands > 32)
6104 h->max_commands = 32; 7102 h->max_commands = 32;
6105 7103
6106 if (h->max_commands < 16) { 7104 if (h->max_commands < MIN_MAX_COMMANDS) {
6107 dev_warn(&h->pdev->dev, "Controller reports " 7105 dev_warn(&h->pdev->dev,
6108 "max supported commands of %d, an obvious lie. " 7106 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
6109 "Using 16. Ensure that firmware is up to date.\n", 7107 h->max_commands,
6110 h->max_commands); 7108 MIN_MAX_COMMANDS);
6111 h->max_commands = 16; 7109 h->max_commands = MIN_MAX_COMMANDS;
6112 } 7110 }
6113} 7111}
6114 7112
@@ -6153,6 +7151,8 @@ static void hpsa_find_board_params(struct ctlr_info *h)
6153 dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); 7151 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
6154 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 7152 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6155 dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); 7153 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7154 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7155 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
6156} 7156}
6157 7157
6158static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) 7158static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
@@ -6222,6 +7222,8 @@ static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
6222 * as we enter this code.) 7222 * as we enter this code.)
6223 */ 7223 */
6224 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) { 7224 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7225 if (h->remove_in_progress)
7226 goto done;
6225 spin_lock_irqsave(&h->lock, flags); 7227 spin_lock_irqsave(&h->lock, flags);
6226 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 7228 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6227 spin_unlock_irqrestore(&h->lock, flags); 7229 spin_unlock_irqrestore(&h->lock, flags);
@@ -6262,6 +7264,22 @@ error:
6262 return -ENODEV; 7264 return -ENODEV;
6263} 7265}
6264 7266
7267/* free items allocated or mapped by hpsa_pci_init */
7268static void hpsa_free_pci_init(struct ctlr_info *h)
7269{
7270 hpsa_free_cfgtables(h); /* pci_init 4 */
7271 iounmap(h->vaddr); /* pci_init 3 */
7272 h->vaddr = NULL;
7273 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
7274 /*
7275 * call pci_disable_device before pci_release_regions per
7276 * Documentation/PCI/pci.txt
7277 */
7278 pci_disable_device(h->pdev); /* pci_init 1 */
7279 pci_release_regions(h->pdev); /* pci_init 2 */
7280}
7281
7282/* several items must be freed later */
6265static int hpsa_pci_init(struct ctlr_info *h) 7283static int hpsa_pci_init(struct ctlr_info *h)
6266{ 7284{
6267 int prod_index, err; 7285 int prod_index, err;
@@ -6272,19 +7290,24 @@ static int hpsa_pci_init(struct ctlr_info *h)
6272 h->product_name = products[prod_index].product_name; 7290 h->product_name = products[prod_index].product_name;
6273 h->access = *(products[prod_index].access); 7291 h->access = *(products[prod_index].access);
6274 7292
7293 h->needs_abort_tags_swizzled =
7294 ctlr_needs_abort_tags_swizzled(h->board_id);
7295
6275 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | 7296 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
6276 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 7297 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
6277 7298
6278 err = pci_enable_device(h->pdev); 7299 err = pci_enable_device(h->pdev);
6279 if (err) { 7300 if (err) {
6280 dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); 7301 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7302 pci_disable_device(h->pdev);
6281 return err; 7303 return err;
6282 } 7304 }
6283 7305
6284 err = pci_request_regions(h->pdev, HPSA); 7306 err = pci_request_regions(h->pdev, HPSA);
6285 if (err) { 7307 if (err) {
6286 dev_err(&h->pdev->dev, 7308 dev_err(&h->pdev->dev,
6287 "cannot obtain PCI resources, aborting\n"); 7309 "failed to obtain PCI resources\n");
7310 pci_disable_device(h->pdev);
6288 return err; 7311 return err;
6289 } 7312 }
6290 7313
@@ -6293,38 +7316,43 @@ static int hpsa_pci_init(struct ctlr_info *h)
6293 hpsa_interrupt_mode(h); 7316 hpsa_interrupt_mode(h);
6294 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 7317 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
6295 if (err) 7318 if (err)
6296 goto err_out_free_res; 7319 goto clean2; /* intmode+region, pci */
6297 h->vaddr = remap_pci_mem(h->paddr, 0x250); 7320 h->vaddr = remap_pci_mem(h->paddr, 0x250);
6298 if (!h->vaddr) { 7321 if (!h->vaddr) {
7322 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
6299 err = -ENOMEM; 7323 err = -ENOMEM;
6300 goto err_out_free_res; 7324 goto clean2; /* intmode+region, pci */
6301 } 7325 }
6302 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 7326 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
6303 if (err) 7327 if (err)
6304 goto err_out_free_res; 7328 goto clean3; /* vaddr, intmode+region, pci */
6305 err = hpsa_find_cfgtables(h); 7329 err = hpsa_find_cfgtables(h);
6306 if (err) 7330 if (err)
6307 goto err_out_free_res; 7331 goto clean3; /* vaddr, intmode+region, pci */
6308 hpsa_find_board_params(h); 7332 hpsa_find_board_params(h);
6309 7333
6310 if (!hpsa_CISS_signature_present(h)) { 7334 if (!hpsa_CISS_signature_present(h)) {
6311 err = -ENODEV; 7335 err = -ENODEV;
6312 goto err_out_free_res; 7336 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
6313 } 7337 }
6314 hpsa_set_driver_support_bits(h); 7338 hpsa_set_driver_support_bits(h);
6315 hpsa_p600_dma_prefetch_quirk(h); 7339 hpsa_p600_dma_prefetch_quirk(h);
6316 err = hpsa_enter_simple_mode(h); 7340 err = hpsa_enter_simple_mode(h);
6317 if (err) 7341 if (err)
6318 goto err_out_free_res; 7342 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
6319 return 0; 7343 return 0;
6320 7344
6321err_out_free_res: 7345clean4: /* cfgtables, vaddr, intmode+region, pci */
6322 if (h->transtable) 7346 hpsa_free_cfgtables(h);
6323 iounmap(h->transtable); 7347clean3: /* vaddr, intmode+region, pci */
6324 if (h->cfgtable) 7348 iounmap(h->vaddr);
6325 iounmap(h->cfgtable); 7349 h->vaddr = NULL;
6326 if (h->vaddr) 7350clean2: /* intmode+region, pci */
6327 iounmap(h->vaddr); 7351 hpsa_disable_interrupt_mode(h);
7352 /*
7353 * call pci_disable_device before pci_release_regions per
7354 * Documentation/PCI/pci.txt
7355 */
6328 pci_disable_device(h->pdev); 7356 pci_disable_device(h->pdev);
6329 pci_release_regions(h->pdev); 7357 pci_release_regions(h->pdev);
6330 return err; 7358 return err;
@@ -6346,7 +7374,7 @@ static void hpsa_hba_inquiry(struct ctlr_info *h)
6346 } 7374 }
6347} 7375}
6348 7376
6349static int hpsa_init_reset_devices(struct pci_dev *pdev) 7377static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
6350{ 7378{
6351 int rc, i; 7379 int rc, i;
6352 void __iomem *vaddr; 7380 void __iomem *vaddr;
@@ -6382,7 +7410,7 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
6382 iounmap(vaddr); 7410 iounmap(vaddr);
6383 7411
6384 /* Reset the controller with a PCI power-cycle or via doorbell */ 7412 /* Reset the controller with a PCI power-cycle or via doorbell */
6385 rc = hpsa_kdump_hard_reset_controller(pdev); 7413 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
6386 7414
6387 /* -ENOTSUPP here means we cannot reset the controller 7415 /* -ENOTSUPP here means we cannot reset the controller
6388 * but it's already (and still) up and running in 7416 * but it's already (and still) up and running in
@@ -6408,7 +7436,29 @@ out_disable:
6408 return rc; 7436 return rc;
6409} 7437}
6410 7438
6411static int hpsa_allocate_cmd_pool(struct ctlr_info *h) 7439static void hpsa_free_cmd_pool(struct ctlr_info *h)
7440{
7441 kfree(h->cmd_pool_bits);
7442 h->cmd_pool_bits = NULL;
7443 if (h->cmd_pool) {
7444 pci_free_consistent(h->pdev,
7445 h->nr_cmds * sizeof(struct CommandList),
7446 h->cmd_pool,
7447 h->cmd_pool_dhandle);
7448 h->cmd_pool = NULL;
7449 h->cmd_pool_dhandle = 0;
7450 }
7451 if (h->errinfo_pool) {
7452 pci_free_consistent(h->pdev,
7453 h->nr_cmds * sizeof(struct ErrorInfo),
7454 h->errinfo_pool,
7455 h->errinfo_pool_dhandle);
7456 h->errinfo_pool = NULL;
7457 h->errinfo_pool_dhandle = 0;
7458 }
7459}
7460
7461static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
6412{ 7462{
6413 h->cmd_pool_bits = kzalloc( 7463 h->cmd_pool_bits = kzalloc(
6414 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * 7464 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
@@ -6425,34 +7475,13 @@ static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
6425 dev_err(&h->pdev->dev, "out of memory in %s", __func__); 7475 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
6426 goto clean_up; 7476 goto clean_up;
6427 } 7477 }
7478 hpsa_preinitialize_commands(h);
6428 return 0; 7479 return 0;
6429clean_up: 7480clean_up:
6430 hpsa_free_cmd_pool(h); 7481 hpsa_free_cmd_pool(h);
6431 return -ENOMEM; 7482 return -ENOMEM;
6432} 7483}
6433 7484
6434static void hpsa_free_cmd_pool(struct ctlr_info *h)
6435{
6436 kfree(h->cmd_pool_bits);
6437 if (h->cmd_pool)
6438 pci_free_consistent(h->pdev,
6439 h->nr_cmds * sizeof(struct CommandList),
6440 h->cmd_pool, h->cmd_pool_dhandle);
6441 if (h->ioaccel2_cmd_pool)
6442 pci_free_consistent(h->pdev,
6443 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6444 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
6445 if (h->errinfo_pool)
6446 pci_free_consistent(h->pdev,
6447 h->nr_cmds * sizeof(struct ErrorInfo),
6448 h->errinfo_pool,
6449 h->errinfo_pool_dhandle);
6450 if (h->ioaccel_cmd_pool)
6451 pci_free_consistent(h->pdev,
6452 h->nr_cmds * sizeof(struct io_accel1_cmd),
6453 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
6454}
6455
6456static void hpsa_irq_affinity_hints(struct ctlr_info *h) 7485static void hpsa_irq_affinity_hints(struct ctlr_info *h)
6457{ 7486{
6458 int i, cpu; 7487 int i, cpu;
@@ -6474,12 +7503,14 @@ static void hpsa_free_irqs(struct ctlr_info *h)
6474 i = h->intr_mode; 7503 i = h->intr_mode;
6475 irq_set_affinity_hint(h->intr[i], NULL); 7504 irq_set_affinity_hint(h->intr[i], NULL);
6476 free_irq(h->intr[i], &h->q[i]); 7505 free_irq(h->intr[i], &h->q[i]);
7506 h->q[i] = 0;
6477 return; 7507 return;
6478 } 7508 }
6479 7509
6480 for (i = 0; i < h->msix_vector; i++) { 7510 for (i = 0; i < h->msix_vector; i++) {
6481 irq_set_affinity_hint(h->intr[i], NULL); 7511 irq_set_affinity_hint(h->intr[i], NULL);
6482 free_irq(h->intr[i], &h->q[i]); 7512 free_irq(h->intr[i], &h->q[i]);
7513 h->q[i] = 0;
6483 } 7514 }
6484 for (; i < MAX_REPLY_QUEUES; i++) 7515 for (; i < MAX_REPLY_QUEUES; i++)
6485 h->q[i] = 0; 7516 h->q[i] = 0;
@@ -6502,8 +7533,9 @@ static int hpsa_request_irqs(struct ctlr_info *h,
6502 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { 7533 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
6503 /* If performant mode and MSI-X, use multiple reply queues */ 7534 /* If performant mode and MSI-X, use multiple reply queues */
6504 for (i = 0; i < h->msix_vector; i++) { 7535 for (i = 0; i < h->msix_vector; i++) {
7536 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
6505 rc = request_irq(h->intr[i], msixhandler, 7537 rc = request_irq(h->intr[i], msixhandler,
6506 0, h->devname, 7538 0, h->intrname[i],
6507 &h->q[i]); 7539 &h->q[i]);
6508 if (rc) { 7540 if (rc) {
6509 int j; 7541 int j;
@@ -6524,18 +7556,30 @@ static int hpsa_request_irqs(struct ctlr_info *h,
6524 } else { 7556 } else {
6525 /* Use single reply pool */ 7557 /* Use single reply pool */
6526 if (h->msix_vector > 0 || h->msi_vector) { 7558 if (h->msix_vector > 0 || h->msi_vector) {
7559 if (h->msix_vector)
7560 sprintf(h->intrname[h->intr_mode],
7561 "%s-msix", h->devname);
7562 else
7563 sprintf(h->intrname[h->intr_mode],
7564 "%s-msi", h->devname);
6527 rc = request_irq(h->intr[h->intr_mode], 7565 rc = request_irq(h->intr[h->intr_mode],
6528 msixhandler, 0, h->devname, 7566 msixhandler, 0,
7567 h->intrname[h->intr_mode],
6529 &h->q[h->intr_mode]); 7568 &h->q[h->intr_mode]);
6530 } else { 7569 } else {
7570 sprintf(h->intrname[h->intr_mode],
7571 "%s-intx", h->devname);
6531 rc = request_irq(h->intr[h->intr_mode], 7572 rc = request_irq(h->intr[h->intr_mode],
6532 intxhandler, IRQF_SHARED, h->devname, 7573 intxhandler, IRQF_SHARED,
7574 h->intrname[h->intr_mode],
6533 &h->q[h->intr_mode]); 7575 &h->q[h->intr_mode]);
6534 } 7576 }
7577 irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
6535 } 7578 }
6536 if (rc) { 7579 if (rc) {
6537 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", 7580 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
6538 h->intr[h->intr_mode], h->devname); 7581 h->intr[h->intr_mode], h->devname);
7582 hpsa_free_irqs(h);
6539 return -ENODEV; 7583 return -ENODEV;
6540 } 7584 }
6541 return 0; 7585 return 0;
@@ -6543,42 +7587,27 @@ static int hpsa_request_irqs(struct ctlr_info *h,
6543 7587
6544static int hpsa_kdump_soft_reset(struct ctlr_info *h) 7588static int hpsa_kdump_soft_reset(struct ctlr_info *h)
6545{ 7589{
6546 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, 7590 int rc;
6547 HPSA_RESET_TYPE_CONTROLLER)) { 7591 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
6548 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
6549 return -EIO;
6550 }
6551 7592
6552 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); 7593 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
6553 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { 7594 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
7595 if (rc) {
6554 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); 7596 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
6555 return -1; 7597 return rc;
6556 } 7598 }
6557 7599
6558 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); 7600 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
6559 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { 7601 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7602 if (rc) {
6560 dev_warn(&h->pdev->dev, "Board failed to become ready " 7603 dev_warn(&h->pdev->dev, "Board failed to become ready "
6561 "after soft reset.\n"); 7604 "after soft reset.\n");
6562 return -1; 7605 return rc;
6563 } 7606 }
6564 7607
6565 return 0; 7608 return 0;
6566} 7609}
6567 7610
6568static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
6569{
6570 hpsa_free_irqs(h);
6571#ifdef CONFIG_PCI_MSI
6572 if (h->msix_vector) {
6573 if (h->pdev->msix_enabled)
6574 pci_disable_msix(h->pdev);
6575 } else if (h->msi_vector) {
6576 if (h->pdev->msi_enabled)
6577 pci_disable_msi(h->pdev);
6578 }
6579#endif /* CONFIG_PCI_MSI */
6580}
6581
6582static void hpsa_free_reply_queues(struct ctlr_info *h) 7611static void hpsa_free_reply_queues(struct ctlr_info *h)
6583{ 7612{
6584 int i; 7613 int i;
@@ -6586,30 +7615,36 @@ static void hpsa_free_reply_queues(struct ctlr_info *h)
6586 for (i = 0; i < h->nreply_queues; i++) { 7615 for (i = 0; i < h->nreply_queues; i++) {
6587 if (!h->reply_queue[i].head) 7616 if (!h->reply_queue[i].head)
6588 continue; 7617 continue;
6589 pci_free_consistent(h->pdev, h->reply_queue_size, 7618 pci_free_consistent(h->pdev,
6590 h->reply_queue[i].head, h->reply_queue[i].busaddr); 7619 h->reply_queue_size,
7620 h->reply_queue[i].head,
7621 h->reply_queue[i].busaddr);
6591 h->reply_queue[i].head = NULL; 7622 h->reply_queue[i].head = NULL;
6592 h->reply_queue[i].busaddr = 0; 7623 h->reply_queue[i].busaddr = 0;
6593 } 7624 }
7625 h->reply_queue_size = 0;
6594} 7626}
6595 7627
6596static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) 7628static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6597{ 7629{
6598 hpsa_free_irqs_and_disable_msix(h); 7630 hpsa_free_performant_mode(h); /* init_one 7 */
6599 hpsa_free_sg_chain_blocks(h); 7631 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
6600 hpsa_free_cmd_pool(h); 7632 hpsa_free_cmd_pool(h); /* init_one 5 */
6601 kfree(h->ioaccel1_blockFetchTable); 7633 hpsa_free_irqs(h); /* init_one 4 */
6602 kfree(h->blockFetchTable); 7634 scsi_host_put(h->scsi_host); /* init_one 3 */
6603 hpsa_free_reply_queues(h); 7635 h->scsi_host = NULL; /* init_one 3 */
6604 if (h->vaddr) 7636 hpsa_free_pci_init(h); /* init_one 2_5 */
6605 iounmap(h->vaddr); 7637 free_percpu(h->lockup_detected); /* init_one 2 */
6606 if (h->transtable) 7638 h->lockup_detected = NULL; /* init_one 2 */
6607 iounmap(h->transtable); 7639 if (h->resubmit_wq) {
6608 if (h->cfgtable) 7640 destroy_workqueue(h->resubmit_wq); /* init_one 1 */
6609 iounmap(h->cfgtable); 7641 h->resubmit_wq = NULL;
6610 pci_disable_device(h->pdev); 7642 }
6611 pci_release_regions(h->pdev); 7643 if (h->rescan_ctlr_wq) {
6612 kfree(h); 7644 destroy_workqueue(h->rescan_ctlr_wq);
7645 h->rescan_ctlr_wq = NULL;
7646 }
7647 kfree(h); /* init_one 1 */
6613} 7648}
6614 7649
6615/* Called when controller lockup detected. */ 7650/* Called when controller lockup detected. */
@@ -6617,17 +7652,22 @@ static void fail_all_outstanding_cmds(struct ctlr_info *h)
6617{ 7652{
6618 int i, refcount; 7653 int i, refcount;
6619 struct CommandList *c; 7654 struct CommandList *c;
7655 int failcount = 0;
6620 7656
6621 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */ 7657 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
6622 for (i = 0; i < h->nr_cmds; i++) { 7658 for (i = 0; i < h->nr_cmds; i++) {
6623 c = h->cmd_pool + i; 7659 c = h->cmd_pool + i;
6624 refcount = atomic_inc_return(&c->refcount); 7660 refcount = atomic_inc_return(&c->refcount);
6625 if (refcount > 1) { 7661 if (refcount > 1) {
6626 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 7662 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
6627 finish_cmd(c); 7663 finish_cmd(c);
7664 atomic_dec(&h->commands_outstanding);
7665 failcount++;
6628 } 7666 }
6629 cmd_free(h, c); 7667 cmd_free(h, c);
6630 } 7668 }
7669 dev_warn(&h->pdev->dev,
7670 "failed %d commands in fail_all\n", failcount);
6631} 7671}
6632 7672
6633static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) 7673static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
@@ -6653,18 +7693,19 @@ static void controller_lockup_detected(struct ctlr_info *h)
6653 if (!lockup_detected) { 7693 if (!lockup_detected) {
6654 /* no heartbeat, but controller gave us a zero. */ 7694 /* no heartbeat, but controller gave us a zero. */
6655 dev_warn(&h->pdev->dev, 7695 dev_warn(&h->pdev->dev,
6656 "lockup detected but scratchpad register is zero\n"); 7696 "lockup detected after %d but scratchpad register is zero\n",
7697 h->heartbeat_sample_interval / HZ);
6657 lockup_detected = 0xffffffff; 7698 lockup_detected = 0xffffffff;
6658 } 7699 }
6659 set_lockup_detected_for_all_cpus(h, lockup_detected); 7700 set_lockup_detected_for_all_cpus(h, lockup_detected);
6660 spin_unlock_irqrestore(&h->lock, flags); 7701 spin_unlock_irqrestore(&h->lock, flags);
6661 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", 7702 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
6662 lockup_detected); 7703 lockup_detected, h->heartbeat_sample_interval / HZ);
6663 pci_disable_device(h->pdev); 7704 pci_disable_device(h->pdev);
6664 fail_all_outstanding_cmds(h); 7705 fail_all_outstanding_cmds(h);
6665} 7706}
6666 7707
6667static void detect_controller_lockup(struct ctlr_info *h) 7708static int detect_controller_lockup(struct ctlr_info *h)
6668{ 7709{
6669 u64 now; 7710 u64 now;
6670 u32 heartbeat; 7711 u32 heartbeat;
@@ -6674,7 +7715,7 @@ static void detect_controller_lockup(struct ctlr_info *h)
6674 /* If we've received an interrupt recently, we're ok. */ 7715 /* If we've received an interrupt recently, we're ok. */
6675 if (time_after64(h->last_intr_timestamp + 7716 if (time_after64(h->last_intr_timestamp +
6676 (h->heartbeat_sample_interval), now)) 7717 (h->heartbeat_sample_interval), now))
6677 return; 7718 return false;
6678 7719
6679 /* 7720 /*
6680 * If we've already checked the heartbeat recently, we're ok. 7721 * If we've already checked the heartbeat recently, we're ok.
@@ -6683,7 +7724,7 @@ static void detect_controller_lockup(struct ctlr_info *h)
6683 */ 7724 */
6684 if (time_after64(h->last_heartbeat_timestamp + 7725 if (time_after64(h->last_heartbeat_timestamp +
6685 (h->heartbeat_sample_interval), now)) 7726 (h->heartbeat_sample_interval), now))
6686 return; 7727 return false;
6687 7728
6688 /* If heartbeat has not changed since we last looked, we're not ok. */ 7729 /* If heartbeat has not changed since we last looked, we're not ok. */
6689 spin_lock_irqsave(&h->lock, flags); 7730 spin_lock_irqsave(&h->lock, flags);
@@ -6691,12 +7732,13 @@ static void detect_controller_lockup(struct ctlr_info *h)
6691 spin_unlock_irqrestore(&h->lock, flags); 7732 spin_unlock_irqrestore(&h->lock, flags);
6692 if (h->last_heartbeat == heartbeat) { 7733 if (h->last_heartbeat == heartbeat) {
6693 controller_lockup_detected(h); 7734 controller_lockup_detected(h);
6694 return; 7735 return true;
6695 } 7736 }
6696 7737
6697 /* We're ok. */ 7738 /* We're ok. */
6698 h->last_heartbeat = heartbeat; 7739 h->last_heartbeat = heartbeat;
6699 h->last_heartbeat_timestamp = now; 7740 h->last_heartbeat_timestamp = now;
7741 return false;
6700} 7742}
6701 7743
6702static void hpsa_ack_ctlr_events(struct ctlr_info *h) 7744static void hpsa_ack_ctlr_events(struct ctlr_info *h)
@@ -6843,11 +7885,18 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6843 struct ctlr_info *h; 7885 struct ctlr_info *h;
6844 int try_soft_reset = 0; 7886 int try_soft_reset = 0;
6845 unsigned long flags; 7887 unsigned long flags;
7888 u32 board_id;
6846 7889
6847 if (number_of_controllers == 0) 7890 if (number_of_controllers == 0)
6848 printk(KERN_INFO DRIVER_NAME "\n"); 7891 printk(KERN_INFO DRIVER_NAME "\n");
6849 7892
6850 rc = hpsa_init_reset_devices(pdev); 7893 rc = hpsa_lookup_board_id(pdev, &board_id);
7894 if (rc < 0) {
7895 dev_warn(&pdev->dev, "Board ID not found\n");
7896 return rc;
7897 }
7898
7899 rc = hpsa_init_reset_devices(pdev, board_id);
6851 if (rc) { 7900 if (rc) {
6852 if (rc != -ENOTSUPP) 7901 if (rc != -ENOTSUPP)
6853 return rc; 7902 return rc;
@@ -6868,42 +7917,41 @@ reinit_after_soft_reset:
6868 */ 7917 */
6869 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 7918 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
6870 h = kzalloc(sizeof(*h), GFP_KERNEL); 7919 h = kzalloc(sizeof(*h), GFP_KERNEL);
6871 if (!h) 7920 if (!h) {
7921 dev_err(&pdev->dev, "Failed to allocate controller head\n");
6872 return -ENOMEM; 7922 return -ENOMEM;
7923 }
6873 7924
6874 h->pdev = pdev; 7925 h->pdev = pdev;
7926
6875 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 7927 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
6876 INIT_LIST_HEAD(&h->offline_device_list); 7928 INIT_LIST_HEAD(&h->offline_device_list);
6877 spin_lock_init(&h->lock); 7929 spin_lock_init(&h->lock);
6878 spin_lock_init(&h->offline_device_lock); 7930 spin_lock_init(&h->offline_device_lock);
6879 spin_lock_init(&h->scan_lock); 7931 spin_lock_init(&h->scan_lock);
6880 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); 7932 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
6881 7933 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
6882 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
6883 if (!h->rescan_ctlr_wq) {
6884 rc = -ENOMEM;
6885 goto clean1;
6886 }
6887
6888 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
6889 if (!h->resubmit_wq) {
6890 rc = -ENOMEM;
6891 goto clean1;
6892 }
6893 7934
6894 /* Allocate and clear per-cpu variable lockup_detected */ 7935 /* Allocate and clear per-cpu variable lockup_detected */
6895 h->lockup_detected = alloc_percpu(u32); 7936 h->lockup_detected = alloc_percpu(u32);
6896 if (!h->lockup_detected) { 7937 if (!h->lockup_detected) {
7938 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
6897 rc = -ENOMEM; 7939 rc = -ENOMEM;
6898 goto clean1; 7940 goto clean1; /* aer/h */
6899 } 7941 }
6900 set_lockup_detected_for_all_cpus(h, 0); 7942 set_lockup_detected_for_all_cpus(h, 0);
6901 7943
6902 rc = hpsa_pci_init(h); 7944 rc = hpsa_pci_init(h);
6903 if (rc != 0) 7945 if (rc)
6904 goto clean1; 7946 goto clean2; /* lu, aer/h */
7947
7948 /* relies on h-> settings made by hpsa_pci_init, including
7949 * interrupt_mode h->intr */
7950 rc = hpsa_scsi_host_alloc(h);
7951 if (rc)
7952 goto clean2_5; /* pci, lu, aer/h */
6905 7953
6906 sprintf(h->devname, HPSA "%d", number_of_controllers); 7954 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
6907 h->ctlr = number_of_controllers; 7955 h->ctlr = number_of_controllers;
6908 number_of_controllers++; 7956 number_of_controllers++;
6909 7957
@@ -6917,34 +7965,57 @@ reinit_after_soft_reset:
6917 dac = 0; 7965 dac = 0;
6918 } else { 7966 } else {
6919 dev_err(&pdev->dev, "no suitable DMA available\n"); 7967 dev_err(&pdev->dev, "no suitable DMA available\n");
6920 goto clean1; 7968 goto clean3; /* shost, pci, lu, aer/h */
6921 } 7969 }
6922 } 7970 }
6923 7971
6924 /* make sure the board interrupts are off */ 7972 /* make sure the board interrupts are off */
6925 h->access.set_intr_mask(h, HPSA_INTR_OFF); 7973 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6926 7974
6927 if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) 7975 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
6928 goto clean2; 7976 if (rc)
6929 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", 7977 goto clean3; /* shost, pci, lu, aer/h */
6930 h->devname, pdev->device, 7978 rc = hpsa_alloc_cmd_pool(h);
6931 h->intr[h->intr_mode], dac ? "" : " not");
6932 rc = hpsa_allocate_cmd_pool(h);
6933 if (rc) 7979 if (rc)
6934 goto clean2_and_free_irqs; 7980 goto clean4; /* irq, shost, pci, lu, aer/h */
6935 if (hpsa_allocate_sg_chain_blocks(h)) 7981 rc = hpsa_alloc_sg_chain_blocks(h);
6936 goto clean4; 7982 if (rc)
7983 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
6937 init_waitqueue_head(&h->scan_wait_queue); 7984 init_waitqueue_head(&h->scan_wait_queue);
7985 init_waitqueue_head(&h->abort_cmd_wait_queue);
7986 init_waitqueue_head(&h->event_sync_wait_queue);
7987 mutex_init(&h->reset_mutex);
6938 h->scan_finished = 1; /* no scan currently in progress */ 7988 h->scan_finished = 1; /* no scan currently in progress */
6939 7989
6940 pci_set_drvdata(pdev, h); 7990 pci_set_drvdata(pdev, h);
6941 h->ndevices = 0; 7991 h->ndevices = 0;
6942 h->hba_mode_enabled = 0; 7992 h->hba_mode_enabled = 0;
6943 h->scsi_host = NULL; 7993
6944 spin_lock_init(&h->devlock); 7994 spin_lock_init(&h->devlock);
6945 hpsa_put_ctlr_into_performant_mode(h); 7995 rc = hpsa_put_ctlr_into_performant_mode(h);
7996 if (rc)
7997 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
7998
7999 /* hook into SCSI subsystem */
8000 rc = hpsa_scsi_add_host(h);
8001 if (rc)
8002 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8003
8004 /* create the resubmit workqueue */
8005 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8006 if (!h->rescan_ctlr_wq) {
8007 rc = -ENOMEM;
8008 goto clean7;
8009 }
6946 8010
6947 /* At this point, the controller is ready to take commands. 8011 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8012 if (!h->resubmit_wq) {
8013 rc = -ENOMEM;
8014 goto clean7; /* aer/h */
8015 }
8016
8017 /*
8018 * At this point, the controller is ready to take commands.
6948 * Now, if reset_devices and the hard reset didn't work, try 8019 * Now, if reset_devices and the hard reset didn't work, try
6949 * the soft reset and see if that works. 8020 * the soft reset and see if that works.
6950 */ 8021 */
@@ -6966,13 +8037,24 @@ reinit_after_soft_reset:
6966 if (rc) { 8037 if (rc) {
6967 dev_warn(&h->pdev->dev, 8038 dev_warn(&h->pdev->dev,
6968 "Failed to request_irq after soft reset.\n"); 8039 "Failed to request_irq after soft reset.\n");
6969 goto clean4; 8040 /*
8041 * cannot goto clean7 or free_irqs will be called
8042 * again. Instead, do its work
8043 */
8044 hpsa_free_performant_mode(h); /* clean7 */
8045 hpsa_free_sg_chain_blocks(h); /* clean6 */
8046 hpsa_free_cmd_pool(h); /* clean5 */
8047 /*
8048 * skip hpsa_free_irqs(h) clean4 since that
8049 * was just called before request_irqs failed
8050 */
8051 goto clean3;
6970 } 8052 }
6971 8053
6972 rc = hpsa_kdump_soft_reset(h); 8054 rc = hpsa_kdump_soft_reset(h);
6973 if (rc) 8055 if (rc)
6974 /* Neither hard nor soft reset worked, we're hosed. */ 8056 /* Neither hard nor soft reset worked, we're hosed. */
6975 goto clean4; 8057 goto clean9;
6976 8058
6977 dev_info(&h->pdev->dev, "Board READY.\n"); 8059 dev_info(&h->pdev->dev, "Board READY.\n");
6978 dev_info(&h->pdev->dev, 8060 dev_info(&h->pdev->dev,
@@ -6993,21 +8075,20 @@ reinit_after_soft_reset:
6993 hpsa_undo_allocations_after_kdump_soft_reset(h); 8075 hpsa_undo_allocations_after_kdump_soft_reset(h);
6994 try_soft_reset = 0; 8076 try_soft_reset = 0;
6995 if (rc) 8077 if (rc)
6996 /* don't go to clean4, we already unallocated */ 8078 /* don't goto clean, we already unallocated */
6997 return -ENODEV; 8079 return -ENODEV;
6998 8080
6999 goto reinit_after_soft_reset; 8081 goto reinit_after_soft_reset;
7000 } 8082 }
7001 8083
7002 /* Enable Accelerated IO path at driver layer */ 8084 /* Enable Accelerated IO path at driver layer */
7003 h->acciopath_status = 1; 8085 h->acciopath_status = 1;
7004 8086
7005 8087
7006 /* Turn the interrupts on so we can service requests */ 8088 /* Turn the interrupts on so we can service requests */
7007 h->access.set_intr_mask(h, HPSA_INTR_ON); 8089 h->access.set_intr_mask(h, HPSA_INTR_ON);
7008 8090
7009 hpsa_hba_inquiry(h); 8091 hpsa_hba_inquiry(h);
7010 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
7011 8092
7012 /* Monitor the controller for firmware lockups */ 8093 /* Monitor the controller for firmware lockups */
7013 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 8094 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
@@ -7019,19 +8100,36 @@ reinit_after_soft_reset:
7019 h->heartbeat_sample_interval); 8100 h->heartbeat_sample_interval);
7020 return 0; 8101 return 0;
7021 8102
7022clean4: 8103clean9: /* wq, sh, perf, sg, cmd, irq, shost, pci, lu, aer/h */
8104 kfree(h->hba_inquiry_data);
8105clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8106 hpsa_free_performant_mode(h);
8107 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8108clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
7023 hpsa_free_sg_chain_blocks(h); 8109 hpsa_free_sg_chain_blocks(h);
8110clean5: /* cmd, irq, shost, pci, lu, aer/h */
7024 hpsa_free_cmd_pool(h); 8111 hpsa_free_cmd_pool(h);
7025clean2_and_free_irqs: 8112clean4: /* irq, shost, pci, lu, aer/h */
7026 hpsa_free_irqs(h); 8113 hpsa_free_irqs(h);
7027clean2: 8114clean3: /* shost, pci, lu, aer/h */
7028clean1: 8115 scsi_host_put(h->scsi_host);
7029 if (h->resubmit_wq) 8116 h->scsi_host = NULL;
8117clean2_5: /* pci, lu, aer/h */
8118 hpsa_free_pci_init(h);
8119clean2: /* lu, aer/h */
8120 if (h->lockup_detected) {
8121 free_percpu(h->lockup_detected);
8122 h->lockup_detected = NULL;
8123 }
8124clean1: /* wq/aer/h */
8125 if (h->resubmit_wq) {
7030 destroy_workqueue(h->resubmit_wq); 8126 destroy_workqueue(h->resubmit_wq);
7031 if (h->rescan_ctlr_wq) 8127 h->resubmit_wq = NULL;
8128 }
8129 if (h->rescan_ctlr_wq) {
7032 destroy_workqueue(h->rescan_ctlr_wq); 8130 destroy_workqueue(h->rescan_ctlr_wq);
7033 if (h->lockup_detected) 8131 h->rescan_ctlr_wq = NULL;
7034 free_percpu(h->lockup_detected); 8132 }
7035 kfree(h); 8133 kfree(h);
7036 return rc; 8134 return rc;
7037} 8135}
@@ -7040,8 +8138,8 @@ static void hpsa_flush_cache(struct ctlr_info *h)
7040{ 8138{
7041 char *flush_buf; 8139 char *flush_buf;
7042 struct CommandList *c; 8140 struct CommandList *c;
8141 int rc;
7043 8142
7044 /* Don't bother trying to flush the cache if locked up */
7045 if (unlikely(lockup_detected(h))) 8143 if (unlikely(lockup_detected(h)))
7046 return; 8144 return;
7047 flush_buf = kzalloc(4, GFP_KERNEL); 8145 flush_buf = kzalloc(4, GFP_KERNEL);
@@ -7049,21 +8147,20 @@ static void hpsa_flush_cache(struct ctlr_info *h)
7049 return; 8147 return;
7050 8148
7051 c = cmd_alloc(h); 8149 c = cmd_alloc(h);
7052 if (!c) { 8150
7053 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
7054 goto out_of_memory;
7055 }
7056 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, 8151 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
7057 RAID_CTLR_LUNID, TYPE_CMD)) { 8152 RAID_CTLR_LUNID, TYPE_CMD)) {
7058 goto out; 8153 goto out;
7059 } 8154 }
7060 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); 8155 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8156 PCI_DMA_TODEVICE, NO_TIMEOUT);
8157 if (rc)
8158 goto out;
7061 if (c->err_info->CommandStatus != 0) 8159 if (c->err_info->CommandStatus != 0)
7062out: 8160out:
7063 dev_warn(&h->pdev->dev, 8161 dev_warn(&h->pdev->dev,
7064 "error flushing cache on controller\n"); 8162 "error flushing cache on controller\n");
7065 cmd_free(h, c); 8163 cmd_free(h, c);
7066out_of_memory:
7067 kfree(flush_buf); 8164 kfree(flush_buf);
7068} 8165}
7069 8166
@@ -7078,15 +8175,18 @@ static void hpsa_shutdown(struct pci_dev *pdev)
7078 */ 8175 */
7079 hpsa_flush_cache(h); 8176 hpsa_flush_cache(h);
7080 h->access.set_intr_mask(h, HPSA_INTR_OFF); 8177 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7081 hpsa_free_irqs_and_disable_msix(h); 8178 hpsa_free_irqs(h); /* init_one 4 */
8179 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
7082} 8180}
7083 8181
7084static void hpsa_free_device_info(struct ctlr_info *h) 8182static void hpsa_free_device_info(struct ctlr_info *h)
7085{ 8183{
7086 int i; 8184 int i;
7087 8185
7088 for (i = 0; i < h->ndevices; i++) 8186 for (i = 0; i < h->ndevices; i++) {
7089 kfree(h->dev[i]); 8187 kfree(h->dev[i]);
8188 h->dev[i] = NULL;
8189 }
7090} 8190}
7091 8191
7092static void hpsa_remove_one(struct pci_dev *pdev) 8192static void hpsa_remove_one(struct pci_dev *pdev)
@@ -7108,29 +8208,34 @@ static void hpsa_remove_one(struct pci_dev *pdev)
7108 cancel_delayed_work_sync(&h->rescan_ctlr_work); 8208 cancel_delayed_work_sync(&h->rescan_ctlr_work);
7109 destroy_workqueue(h->rescan_ctlr_wq); 8209 destroy_workqueue(h->rescan_ctlr_wq);
7110 destroy_workqueue(h->resubmit_wq); 8210 destroy_workqueue(h->resubmit_wq);
7111 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 8211
8212 /* includes hpsa_free_irqs - init_one 4 */
8213 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
7112 hpsa_shutdown(pdev); 8214 hpsa_shutdown(pdev);
7113 iounmap(h->vaddr); 8215
7114 iounmap(h->transtable); 8216 hpsa_free_device_info(h); /* scan */
7115 iounmap(h->cfgtable); 8217
7116 hpsa_free_device_info(h); 8218 kfree(h->hba_inquiry_data); /* init_one 10 */
7117 hpsa_free_sg_chain_blocks(h); 8219 h->hba_inquiry_data = NULL; /* init_one 10 */
7118 pci_free_consistent(h->pdev, 8220 if (h->scsi_host)
7119 h->nr_cmds * sizeof(struct CommandList), 8221 scsi_remove_host(h->scsi_host); /* init_one 8 */
7120 h->cmd_pool, h->cmd_pool_dhandle); 8222 hpsa_free_ioaccel2_sg_chain_blocks(h);
7121 pci_free_consistent(h->pdev, 8223 hpsa_free_performant_mode(h); /* init_one 7 */
7122 h->nr_cmds * sizeof(struct ErrorInfo), 8224 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
7123 h->errinfo_pool, h->errinfo_pool_dhandle); 8225 hpsa_free_cmd_pool(h); /* init_one 5 */
7124 hpsa_free_reply_queues(h); 8226
7125 kfree(h->cmd_pool_bits); 8227 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
7126 kfree(h->blockFetchTable); 8228
7127 kfree(h->ioaccel1_blockFetchTable); 8229 scsi_host_put(h->scsi_host); /* init_one 3 */
7128 kfree(h->ioaccel2_blockFetchTable); 8230 h->scsi_host = NULL; /* init_one 3 */
7129 kfree(h->hba_inquiry_data); 8231
7130 pci_disable_device(pdev); 8232 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
7131 pci_release_regions(pdev); 8233 hpsa_free_pci_init(h); /* init_one 2.5 */
7132 free_percpu(h->lockup_detected); 8234
7133 kfree(h); 8235 free_percpu(h->lockup_detected); /* init_one 2 */
8236 h->lockup_detected = NULL; /* init_one 2 */
8237 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
8238 kfree(h); /* init_one 1 */
7134} 8239}
7135 8240
7136static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, 8241static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
@@ -7188,7 +8293,10 @@ static void calc_bucket_map(int bucket[], int num_buckets,
7188 } 8293 }
7189} 8294}
7190 8295
7191/* return -ENODEV or other reason on error, 0 on success */ 8296/*
8297 * return -ENODEV on err, 0 on success (or no action)
8298 * allocates numerous items that must be freed later
8299 */
7192static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) 8300static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7193{ 8301{
7194 int i; 8302 int i;
@@ -7370,7 +8478,23 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7370 return 0; 8478 return 0;
7371} 8479}
7372 8480
7373static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) 8481/* Free ioaccel1 mode command blocks and block fetch table */
8482static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8483{
8484 if (h->ioaccel_cmd_pool) {
8485 pci_free_consistent(h->pdev,
8486 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8487 h->ioaccel_cmd_pool,
8488 h->ioaccel_cmd_pool_dhandle);
8489 h->ioaccel_cmd_pool = NULL;
8490 h->ioaccel_cmd_pool_dhandle = 0;
8491 }
8492 kfree(h->ioaccel1_blockFetchTable);
8493 h->ioaccel1_blockFetchTable = NULL;
8494}
8495
8496/* Allocate ioaccel1 mode command blocks and block fetch table */
8497static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
7374{ 8498{
7375 h->ioaccel_maxsg = 8499 h->ioaccel_maxsg =
7376 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 8500 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
@@ -7401,16 +8525,32 @@ static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7401 return 0; 8525 return 0;
7402 8526
7403clean_up: 8527clean_up:
7404 if (h->ioaccel_cmd_pool) 8528 hpsa_free_ioaccel1_cmd_and_bft(h);
8529 return -ENOMEM;
8530}
8531
8532/* Free ioaccel2 mode command blocks and block fetch table */
8533static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
8534{
8535 hpsa_free_ioaccel2_sg_chain_blocks(h);
8536
8537 if (h->ioaccel2_cmd_pool) {
7405 pci_free_consistent(h->pdev, 8538 pci_free_consistent(h->pdev,
7406 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 8539 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7407 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); 8540 h->ioaccel2_cmd_pool,
7408 kfree(h->ioaccel1_blockFetchTable); 8541 h->ioaccel2_cmd_pool_dhandle);
7409 return 1; 8542 h->ioaccel2_cmd_pool = NULL;
8543 h->ioaccel2_cmd_pool_dhandle = 0;
8544 }
8545 kfree(h->ioaccel2_blockFetchTable);
8546 h->ioaccel2_blockFetchTable = NULL;
7410} 8547}
7411 8548
7412static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) 8549/* Allocate ioaccel2 mode command blocks and block fetch table */
8550static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
7413{ 8551{
8552 int rc;
8553
7414 /* Allocate ioaccel2 mode command blocks and block fetch table */ 8554 /* Allocate ioaccel2 mode command blocks and block fetch table */
7415 8555
7416 h->ioaccel_maxsg = 8556 h->ioaccel_maxsg =
@@ -7430,7 +8570,13 @@ static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7430 sizeof(u32)), GFP_KERNEL); 8570 sizeof(u32)), GFP_KERNEL);
7431 8571
7432 if ((h->ioaccel2_cmd_pool == NULL) || 8572 if ((h->ioaccel2_cmd_pool == NULL) ||
7433 (h->ioaccel2_blockFetchTable == NULL)) 8573 (h->ioaccel2_blockFetchTable == NULL)) {
8574 rc = -ENOMEM;
8575 goto clean_up;
8576 }
8577
8578 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
8579 if (rc)
7434 goto clean_up; 8580 goto clean_up;
7435 8581
7436 memset(h->ioaccel2_cmd_pool, 0, 8582 memset(h->ioaccel2_cmd_pool, 0,
@@ -7438,41 +8584,50 @@ static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7438 return 0; 8584 return 0;
7439 8585
7440clean_up: 8586clean_up:
7441 if (h->ioaccel2_cmd_pool) 8587 hpsa_free_ioaccel2_cmd_and_bft(h);
7442 pci_free_consistent(h->pdev, 8588 return rc;
7443 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7444 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
7445 kfree(h->ioaccel2_blockFetchTable);
7446 return 1;
7447} 8589}
7448 8590
7449static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 8591/* Free items allocated by hpsa_put_ctlr_into_performant_mode */
8592static void hpsa_free_performant_mode(struct ctlr_info *h)
8593{
8594 kfree(h->blockFetchTable);
8595 h->blockFetchTable = NULL;
8596 hpsa_free_reply_queues(h);
8597 hpsa_free_ioaccel1_cmd_and_bft(h);
8598 hpsa_free_ioaccel2_cmd_and_bft(h);
8599}
8600
8601/* return -ENODEV on error, 0 on success (or no action)
8602 * allocates numerous items that must be freed later
8603 */
8604static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7450{ 8605{
7451 u32 trans_support; 8606 u32 trans_support;
7452 unsigned long transMethod = CFGTBL_Trans_Performant | 8607 unsigned long transMethod = CFGTBL_Trans_Performant |
7453 CFGTBL_Trans_use_short_tags; 8608 CFGTBL_Trans_use_short_tags;
7454 int i; 8609 int i, rc;
7455 8610
7456 if (hpsa_simple_mode) 8611 if (hpsa_simple_mode)
7457 return; 8612 return 0;
7458 8613
7459 trans_support = readl(&(h->cfgtable->TransportSupport)); 8614 trans_support = readl(&(h->cfgtable->TransportSupport));
7460 if (!(trans_support & PERFORMANT_MODE)) 8615 if (!(trans_support & PERFORMANT_MODE))
7461 return; 8616 return 0;
7462 8617
7463 /* Check for I/O accelerator mode support */ 8618 /* Check for I/O accelerator mode support */
7464 if (trans_support & CFGTBL_Trans_io_accel1) { 8619 if (trans_support & CFGTBL_Trans_io_accel1) {
7465 transMethod |= CFGTBL_Trans_io_accel1 | 8620 transMethod |= CFGTBL_Trans_io_accel1 |
7466 CFGTBL_Trans_enable_directed_msix; 8621 CFGTBL_Trans_enable_directed_msix;
7467 if (hpsa_alloc_ioaccel_cmd_and_bft(h)) 8622 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
7468 goto clean_up; 8623 if (rc)
7469 } else { 8624 return rc;
7470 if (trans_support & CFGTBL_Trans_io_accel2) { 8625 } else if (trans_support & CFGTBL_Trans_io_accel2) {
7471 transMethod |= CFGTBL_Trans_io_accel2 | 8626 transMethod |= CFGTBL_Trans_io_accel2 |
7472 CFGTBL_Trans_enable_directed_msix; 8627 CFGTBL_Trans_enable_directed_msix;
7473 if (ioaccel2_alloc_cmds_and_bft(h)) 8628 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
7474 goto clean_up; 8629 if (rc)
7475 } 8630 return rc;
7476 } 8631 }
7477 8632
7478 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; 8633 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
@@ -7484,8 +8639,10 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7484 h->reply_queue[i].head = pci_alloc_consistent(h->pdev, 8639 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
7485 h->reply_queue_size, 8640 h->reply_queue_size,
7486 &(h->reply_queue[i].busaddr)); 8641 &(h->reply_queue[i].busaddr));
7487 if (!h->reply_queue[i].head) 8642 if (!h->reply_queue[i].head) {
7488 goto clean_up; 8643 rc = -ENOMEM;
8644 goto clean1; /* rq, ioaccel */
8645 }
7489 h->reply_queue[i].size = h->max_commands; 8646 h->reply_queue[i].size = h->max_commands;
7490 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ 8647 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
7491 h->reply_queue[i].current_entry = 0; 8648 h->reply_queue[i].current_entry = 0;
@@ -7494,15 +8651,24 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7494 /* Need a block fetch table for performant mode */ 8651 /* Need a block fetch table for performant mode */
7495 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * 8652 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
7496 sizeof(u32)), GFP_KERNEL); 8653 sizeof(u32)), GFP_KERNEL);
7497 if (!h->blockFetchTable) 8654 if (!h->blockFetchTable) {
7498 goto clean_up; 8655 rc = -ENOMEM;
8656 goto clean1; /* rq, ioaccel */
8657 }
7499 8658
7500 hpsa_enter_performant_mode(h, trans_support); 8659 rc = hpsa_enter_performant_mode(h, trans_support);
7501 return; 8660 if (rc)
8661 goto clean2; /* bft, rq, ioaccel */
8662 return 0;
7502 8663
7503clean_up: 8664clean2: /* bft, rq, ioaccel */
7504 hpsa_free_reply_queues(h);
7505 kfree(h->blockFetchTable); 8665 kfree(h->blockFetchTable);
8666 h->blockFetchTable = NULL;
8667clean1: /* rq, ioaccel */
8668 hpsa_free_reply_queues(h);
8669 hpsa_free_ioaccel1_cmd_and_bft(h);
8670 hpsa_free_ioaccel2_cmd_and_bft(h);
8671 return rc;
7506} 8672}
7507 8673
7508static int is_accelerated_cmd(struct CommandList *c) 8674static int is_accelerated_cmd(struct CommandList *c)
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 657713050349..6ee4da6b1153 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -47,6 +47,7 @@ struct hpsa_scsi_dev_t {
47 unsigned char raid_level; /* from inquiry page 0xC1 */ 47 unsigned char raid_level; /* from inquiry page 0xC1 */
48 unsigned char volume_offline; /* discovered via TUR or VPD */ 48 unsigned char volume_offline; /* discovered via TUR or VPD */
49 u16 queue_depth; /* max queue_depth for this device */ 49 u16 queue_depth; /* max queue_depth for this device */
50 atomic_t reset_cmds_out; /* Count of commands to-be affected */
50 atomic_t ioaccel_cmds_out; /* Only used for physical devices 51 atomic_t ioaccel_cmds_out; /* Only used for physical devices
51 * counts commands sent to physical 52 * counts commands sent to physical
52 * device via "ioaccel" path. 53 * device via "ioaccel" path.
@@ -54,6 +55,8 @@ struct hpsa_scsi_dev_t {
54 u32 ioaccel_handle; 55 u32 ioaccel_handle;
55 int offload_config; /* I/O accel RAID offload configured */ 56 int offload_config; /* I/O accel RAID offload configured */
56 int offload_enabled; /* I/O accel RAID offload enabled */ 57 int offload_enabled; /* I/O accel RAID offload enabled */
58 int offload_to_be_enabled;
59 int hba_ioaccel_enabled;
57 int offload_to_mirror; /* Send next I/O accelerator RAID 60 int offload_to_mirror; /* Send next I/O accelerator RAID
58 * offload request to mirror drive 61 * offload request to mirror drive
59 */ 62 */
@@ -68,6 +71,13 @@ struct hpsa_scsi_dev_t {
68 * devices in order to honor physical device queue depth limits. 71 * devices in order to honor physical device queue depth limits.
69 */ 72 */
70 struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES]; 73 struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
74 int nphysical_disks;
75 int supports_aborts;
76#define HPSA_DO_NOT_EXPOSE 0x0
77#define HPSA_SG_ATTACH 0x1
78#define HPSA_ULD_ATTACH 0x2
79#define HPSA_SCSI_ADD (HPSA_SG_ATTACH | HPSA_ULD_ATTACH)
80 u8 expose_state;
71}; 81};
72 82
73struct reply_queue_buffer { 83struct reply_queue_buffer {
@@ -133,7 +143,6 @@ struct ctlr_info {
133 struct CfgTable __iomem *cfgtable; 143 struct CfgTable __iomem *cfgtable;
134 int interrupts_enabled; 144 int interrupts_enabled;
135 int max_commands; 145 int max_commands;
136 int last_allocation;
137 atomic_t commands_outstanding; 146 atomic_t commands_outstanding;
138# define PERF_MODE_INT 0 147# define PERF_MODE_INT 0
139# define DOORBELL_INT 1 148# define DOORBELL_INT 1
@@ -154,6 +163,7 @@ struct ctlr_info {
154 u8 max_cmd_sg_entries; 163 u8 max_cmd_sg_entries;
155 int chainsize; 164 int chainsize;
156 struct SGDescriptor **cmd_sg_list; 165 struct SGDescriptor **cmd_sg_list;
166 struct ioaccel2_sg_element **ioaccel2_cmd_sg_list;
157 167
158 /* pointers to command and error info pool */ 168 /* pointers to command and error info pool */
159 struct CommandList *cmd_pool; 169 struct CommandList *cmd_pool;
@@ -211,6 +221,7 @@ struct ctlr_info {
211 int remove_in_progress; 221 int remove_in_progress;
212 /* Address of h->q[x] is passed to intr handler to know which queue */ 222 /* Address of h->q[x] is passed to intr handler to know which queue */
213 u8 q[MAX_REPLY_QUEUES]; 223 u8 q[MAX_REPLY_QUEUES];
224 char intrname[MAX_REPLY_QUEUES][16]; /* "hpsa0-msix00" names */
214 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ 225 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
215#define HPSATMF_BITS_SUPPORTED (1 << 0) 226#define HPSATMF_BITS_SUPPORTED (1 << 0)
216#define HPSATMF_PHYS_LUN_RESET (1 << 1) 227#define HPSATMF_PHYS_LUN_RESET (1 << 1)
@@ -222,6 +233,7 @@ struct ctlr_info {
222#define HPSATMF_PHYS_QRY_TASK (1 << 7) 233#define HPSATMF_PHYS_QRY_TASK (1 << 7)
223#define HPSATMF_PHYS_QRY_TSET (1 << 8) 234#define HPSATMF_PHYS_QRY_TSET (1 << 8)
224#define HPSATMF_PHYS_QRY_ASYNC (1 << 9) 235#define HPSATMF_PHYS_QRY_ASYNC (1 << 9)
236#define HPSATMF_IOACCEL_ENABLED (1 << 15)
225#define HPSATMF_MASK_SUPPORTED (1 << 16) 237#define HPSATMF_MASK_SUPPORTED (1 << 16)
226#define HPSATMF_LOG_LUN_RESET (1 << 17) 238#define HPSATMF_LOG_LUN_RESET (1 << 17)
227#define HPSATMF_LOG_NEX_RESET (1 << 18) 239#define HPSATMF_LOG_NEX_RESET (1 << 18)
@@ -251,8 +263,13 @@ struct ctlr_info {
251 struct list_head offline_device_list; 263 struct list_head offline_device_list;
252 int acciopath_status; 264 int acciopath_status;
253 int raid_offload_debug; 265 int raid_offload_debug;
266 int needs_abort_tags_swizzled;
254 struct workqueue_struct *resubmit_wq; 267 struct workqueue_struct *resubmit_wq;
255 struct workqueue_struct *rescan_ctlr_wq; 268 struct workqueue_struct *rescan_ctlr_wq;
269 atomic_t abort_cmds_available;
270 wait_queue_head_t abort_cmd_wait_queue;
271 wait_queue_head_t event_sync_wait_queue;
272 struct mutex reset_mutex;
256}; 273};
257 274
258struct offline_device_entry { 275struct offline_device_entry {
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 3a621c74b76f..c601622cc98e 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -42,8 +42,22 @@
42#define CMD_UNSOLICITED_ABORT 0x000A 42#define CMD_UNSOLICITED_ABORT 0x000A
43#define CMD_TIMEOUT 0x000B 43#define CMD_TIMEOUT 0x000B
44#define CMD_UNABORTABLE 0x000C 44#define CMD_UNABORTABLE 0x000C
45#define CMD_TMF_STATUS 0x000D
45#define CMD_IOACCEL_DISABLED 0x000E 46#define CMD_IOACCEL_DISABLED 0x000E
47#define CMD_CTLR_LOCKUP 0xffff
48/* Note: CMD_CTLR_LOCKUP is not a value defined by the CISS spec
49 * it is a value defined by the driver that commands can be marked
50 * with when a controller lockup has been detected by the driver
51 */
46 52
53/* TMF function status values */
54#define CISS_TMF_COMPLETE 0x00
55#define CISS_TMF_INVALID_FRAME 0x02
56#define CISS_TMF_NOT_SUPPORTED 0x04
57#define CISS_TMF_FAILED 0x05
58#define CISS_TMF_SUCCESS 0x08
59#define CISS_TMF_WRONG_LUN 0x09
60#define CISS_TMF_OVERLAPPED_TAG 0x0a
47 61
48/* Unit Attentions ASC's as defined for the MSA2012sa */ 62/* Unit Attentions ASC's as defined for the MSA2012sa */
49#define POWER_OR_RESET 0x29 63#define POWER_OR_RESET 0x29
@@ -240,6 +254,7 @@ struct ReportLUNdata {
240 254
241struct ext_report_lun_entry { 255struct ext_report_lun_entry {
242 u8 lunid[8]; 256 u8 lunid[8];
257#define MASKED_DEVICE(x) ((x)[3] & 0xC0)
243#define GET_BMIC_BUS(lunid) ((lunid)[7] & 0x3F) 258#define GET_BMIC_BUS(lunid) ((lunid)[7] & 0x3F)
244#define GET_BMIC_LEVEL_TWO_TARGET(lunid) ((lunid)[6]) 259#define GET_BMIC_LEVEL_TWO_TARGET(lunid) ((lunid)[6])
245#define GET_BMIC_DRIVE_NUMBER(lunid) (((GET_BMIC_BUS((lunid)) - 1) << 8) + \ 260#define GET_BMIC_DRIVE_NUMBER(lunid) (((GET_BMIC_BUS((lunid)) - 1) << 8) + \
@@ -247,6 +262,8 @@ struct ext_report_lun_entry {
247 u8 wwid[8]; 262 u8 wwid[8];
248 u8 device_type; 263 u8 device_type;
249 u8 device_flags; 264 u8 device_flags;
265#define NON_DISK_PHYS_DEV(x) ((x)[17] & 0x01)
266#define PHYS_IOACCEL(x) ((x)[17] & 0x08)
250 u8 lun_count; /* multi-lun device, how many luns */ 267 u8 lun_count; /* multi-lun device, how many luns */
251 u8 redundant_paths; 268 u8 redundant_paths;
252 u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */ 269 u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */
@@ -379,6 +396,7 @@ struct ErrorInfo {
379#define CMD_SCSI 0x03 396#define CMD_SCSI 0x03
380#define CMD_IOACCEL1 0x04 397#define CMD_IOACCEL1 0x04
381#define CMD_IOACCEL2 0x05 398#define CMD_IOACCEL2 0x05
399#define IOACCEL2_TMF 0x06
382 400
383#define DIRECT_LOOKUP_SHIFT 4 401#define DIRECT_LOOKUP_SHIFT 4
384#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1)) 402#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1))
@@ -421,7 +439,10 @@ struct CommandList {
421 * not used. 439 * not used.
422 */ 440 */
423 struct hpsa_scsi_dev_t *phys_disk; 441 struct hpsa_scsi_dev_t *phys_disk;
424 atomic_t refcount; /* Must be last to avoid memset in cmd_alloc */ 442
443 int abort_pending;
444 struct hpsa_scsi_dev_t *reset_pending;
445 atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */
425} __aligned(COMMANDLIST_ALIGNMENT); 446} __aligned(COMMANDLIST_ALIGNMENT);
426 447
427/* Max S/G elements in I/O accelerator command */ 448/* Max S/G elements in I/O accelerator command */
@@ -515,6 +536,12 @@ struct io_accel2_scsi_response {
515#define IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL 0x28 536#define IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL 0x28
516#define IOACCEL2_STATUS_SR_TASK_COMP_ABORTED 0x40 537#define IOACCEL2_STATUS_SR_TASK_COMP_ABORTED 0x40
517#define IOACCEL2_STATUS_SR_IOACCEL_DISABLED 0x0E 538#define IOACCEL2_STATUS_SR_IOACCEL_DISABLED 0x0E
539#define IOACCEL2_STATUS_SR_IO_ERROR 0x01
540#define IOACCEL2_STATUS_SR_IO_ABORTED 0x02
541#define IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE 0x03
542#define IOACCEL2_STATUS_SR_INVALID_DEVICE 0x04
543#define IOACCEL2_STATUS_SR_UNDERRUN 0x51
544#define IOACCEL2_STATUS_SR_OVERRUN 0x75
518 u8 data_present; /* low 2 bits */ 545 u8 data_present; /* low 2 bits */
519#define IOACCEL2_NO_DATAPRESENT 0x000 546#define IOACCEL2_NO_DATAPRESENT 0x000
520#define IOACCEL2_RESPONSE_DATAPRESENT 0x001 547#define IOACCEL2_RESPONSE_DATAPRESENT 0x001
@@ -567,6 +594,7 @@ struct io_accel2_cmd {
567#define IOACCEL2_DIR_NO_DATA 0x00 594#define IOACCEL2_DIR_NO_DATA 0x00
568#define IOACCEL2_DIR_DATA_IN 0x01 595#define IOACCEL2_DIR_DATA_IN 0x01
569#define IOACCEL2_DIR_DATA_OUT 0x02 596#define IOACCEL2_DIR_DATA_OUT 0x02
597#define IOACCEL2_TMF_ABORT 0x01
570/* 598/*
571 * SCSI Task Management Request format for Accelerator Mode 2 599 * SCSI Task Management Request format for Accelerator Mode 2
572 */ 600 */
@@ -575,13 +603,13 @@ struct hpsa_tmf_struct {
575 u8 reply_queue; /* Reply Queue ID */ 603 u8 reply_queue; /* Reply Queue ID */
576 u8 tmf; /* Task Management Function */ 604 u8 tmf; /* Task Management Function */
577 u8 reserved1; /* byte 3 Reserved */ 605 u8 reserved1; /* byte 3 Reserved */
578 u32 it_nexus; /* SCSI I-T Nexus */ 606 __le32 it_nexus; /* SCSI I-T Nexus */
579 u8 lun_id[8]; /* LUN ID for TMF request */ 607 u8 lun_id[8]; /* LUN ID for TMF request */
580 __le64 tag; /* cciss tag associated w/ request */ 608 __le64 tag; /* cciss tag associated w/ request */
581 __le64 abort_tag; /* cciss tag of SCSI cmd or TMF to abort */ 609 __le64 abort_tag; /* cciss tag of SCSI cmd or TMF to abort */
582 __le64 error_ptr; /* Error Pointer */ 610 __le64 error_ptr; /* Error Pointer */
583 __le32 error_len; /* Error Length */ 611 __le32 error_len; /* Error Length */
584}; 612} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
585 613
586/* Configuration Table Structure */ 614/* Configuration Table Structure */
587struct HostWrite { 615struct HostWrite {
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 89a8266560d0..4e1a632ccf16 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1109,7 +1109,6 @@ static struct scsi_host_template imm_template = {
1109 .bios_param = imm_biosparam, 1109 .bios_param = imm_biosparam,
1110 .this_id = 7, 1110 .this_id = 7,
1111 .sg_tablesize = SG_ALL, 1111 .sg_tablesize = SG_ALL,
1112 .cmd_per_lun = 1,
1113 .use_clustering = ENABLE_CLUSTERING, 1112 .use_clustering = ENABLE_CLUSTERING,
1114 .can_queue = 1, 1113 .can_queue = 1,
1115 .slave_alloc = imm_adjust_queue, 1114 .slave_alloc = imm_adjust_queue,
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index e5dae7b54d9a..6a926bae76b2 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2833,7 +2833,6 @@ static struct scsi_host_template initio_template = {
2833 .can_queue = MAX_TARGETS * i91u_MAXQUEUE, 2833 .can_queue = MAX_TARGETS * i91u_MAXQUEUE,
2834 .this_id = 1, 2834 .this_id = 1,
2835 .sg_tablesize = SG_ALL, 2835 .sg_tablesize = SG_ALL,
2836 .cmd_per_lun = 1,
2837 .use_clustering = ENABLE_CLUSTERING, 2836 .use_clustering = ENABLE_CLUSTERING,
2838}; 2837};
2839 2838
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 47412cf4eaac..73790a1d0969 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -272,7 +272,7 @@
272#define IPR_RUNTIME_RESET 0x40000000 272#define IPR_RUNTIME_RESET 0x40000000
273 273
274#define IPR_IPL_INIT_MIN_STAGE_TIME 5 274#define IPR_IPL_INIT_MIN_STAGE_TIME 5
275#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 15 275#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 30
276#define IPR_IPL_INIT_STAGE_UNKNOWN 0x0 276#define IPR_IPL_INIT_STAGE_UNKNOWN 0x0
277#define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000 277#define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000
278#define IPR_IPL_INIT_STAGE_MASK 0xff000000 278#define IPR_IPL_INIT_STAGE_MASK 0xff000000
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 7542f11d3fcd..02cb76fd4420 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -206,10 +206,6 @@ module_param(ips, charp, 0);
206#define IPS_VERSION_HIGH IPS_VER_MAJOR_STRING "." IPS_VER_MINOR_STRING 206#define IPS_VERSION_HIGH IPS_VER_MAJOR_STRING "." IPS_VER_MINOR_STRING
207#define IPS_VERSION_LOW "." IPS_VER_BUILD_STRING " " 207#define IPS_VERSION_LOW "." IPS_VER_BUILD_STRING " "
208 208
209#if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__)
210#warning "This driver has only been tested on the x86/ia64/x86_64 platforms"
211#endif
212
213#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \ 209#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
214 DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \ 210 DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
215 PCI_DMA_BIDIRECTIONAL : \ 211 PCI_DMA_BIDIRECTIONAL : \
@@ -6788,6 +6784,11 @@ ips_remove_device(struct pci_dev *pci_dev)
6788static int __init 6784static int __init
6789ips_module_init(void) 6785ips_module_init(void)
6790{ 6786{
6787#if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__)
6788 printk(KERN_ERR "ips: This driver has only been tested on the x86/ia64/x86_64 platforms\n");
6789 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
6790#endif
6791
6791 if (pci_register_driver(&ips_pci_driver) < 0) 6792 if (pci_register_driver(&ips_pci_driver) < 0)
6792 return -ENODEV; 6793 return -ENODEV;
6793 ips_driver_template.module = THIS_MODULE; 6794 ips_driver_template.module = THIS_MODULE;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index cd41b63a2f10..0dfcabe3ca7c 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -160,7 +160,6 @@ static struct scsi_host_template isci_sht = {
160 .change_queue_depth = sas_change_queue_depth, 160 .change_queue_depth = sas_change_queue_depth,
161 .bios_param = sas_bios_param, 161 .bios_param = sas_bios_param,
162 .can_queue = ISCI_CAN_QUEUE_VAL, 162 .can_queue = ISCI_CAN_QUEUE_VAL,
163 .cmd_per_lun = 1,
164 .this_id = -1, 163 .this_id = -1,
165 .sg_tablesize = SG_ALL, 164 .sg_tablesize = SG_ALL,
166 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 165 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 9b81a34d7449..a5a56fa31e70 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -230,6 +230,8 @@ struct lpfc_stats {
230 uint32_t elsRcvRRQ; 230 uint32_t elsRcvRRQ;
231 uint32_t elsRcvRTV; 231 uint32_t elsRcvRTV;
232 uint32_t elsRcvECHO; 232 uint32_t elsRcvECHO;
233 uint32_t elsRcvLCB;
234 uint32_t elsRcvRDP;
233 uint32_t elsXmitFLOGI; 235 uint32_t elsXmitFLOGI;
234 uint32_t elsXmitFDISC; 236 uint32_t elsXmitFDISC;
235 uint32_t elsXmitPLOGI; 237 uint32_t elsXmitPLOGI;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 587e3e962f2b..b0e6fe46448d 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -498,3 +498,5 @@ bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
498bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *, 498bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
499 struct lpfc_name *, uint64_t *, struct lpfc_name *, 499 struct lpfc_name *, uint64_t *, struct lpfc_name *,
500 struct lpfc_name *, uint64_t *, uint32_t *); 500 struct lpfc_name *, uint64_t *, uint32_t *);
501int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
502void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 6977027979be..361f5b3d9d93 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -79,7 +79,6 @@ struct lpfc_nodelist {
79 struct lpfc_name nlp_portname; 79 struct lpfc_name nlp_portname;
80 struct lpfc_name nlp_nodename; 80 struct lpfc_name nlp_nodename;
81 uint32_t nlp_flag; /* entry flags */ 81 uint32_t nlp_flag; /* entry flags */
82 uint32_t nlp_add_flag; /* additional flags */
83 uint32_t nlp_DID; /* FC D_ID of entry */ 82 uint32_t nlp_DID; /* FC D_ID of entry */
84 uint32_t nlp_last_elscmd; /* Last ELS cmd sent */ 83 uint32_t nlp_last_elscmd; /* Last ELS cmd sent */
85 uint16_t nlp_type; 84 uint16_t nlp_type;
@@ -147,6 +146,7 @@ struct lpfc_node_rrq {
147#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */ 146#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */
148#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */ 147#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */
149#define NLP_ISSUE_LOGO 0x00400000 /* waiting to issue a LOGO */ 148#define NLP_ISSUE_LOGO 0x00400000 /* waiting to issue a LOGO */
149#define NLP_IN_DEV_LOSS 0x00800000 /* devloss in progress */
150#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful 150#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful
151 ACC */ 151 ACC */
152#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from 152#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from
@@ -158,8 +158,6 @@ struct lpfc_node_rrq {
158#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */ 158#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */
159#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */ 159#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */
160 160
161/* Defines for nlp_add_flag (uint32) */
162#define NLP_IN_DEV_LOSS 0x00000001 /* Dev Loss processing in progress */
163 161
164/* ndlp usage management macros */ 162/* ndlp usage management macros */
165#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ 163#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 851e8efe364e..36bf58ba750a 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1509,12 +1509,14 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1509 struct lpfc_nodelist *ndlp) 1509 struct lpfc_nodelist *ndlp)
1510{ 1510{
1511 struct lpfc_vport *vport = ndlp->vport; 1511 struct lpfc_vport *vport = ndlp->vport;
1512 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1512 struct lpfc_nodelist *new_ndlp; 1513 struct lpfc_nodelist *new_ndlp;
1513 struct lpfc_rport_data *rdata; 1514 struct lpfc_rport_data *rdata;
1514 struct fc_rport *rport; 1515 struct fc_rport *rport;
1515 struct serv_parm *sp; 1516 struct serv_parm *sp;
1516 uint8_t name[sizeof(struct lpfc_name)]; 1517 uint8_t name[sizeof(struct lpfc_name)];
1517 uint32_t rc, keepDID = 0; 1518 uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
1519 uint16_t keep_nlp_state;
1518 int put_node; 1520 int put_node;
1519 int put_rport; 1521 int put_rport;
1520 unsigned long *active_rrqs_xri_bitmap = NULL; 1522 unsigned long *active_rrqs_xri_bitmap = NULL;
@@ -1603,11 +1605,14 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1603 ndlp->active_rrqs_xri_bitmap, 1605 ndlp->active_rrqs_xri_bitmap,
1604 phba->cfg_rrq_xri_bitmap_sz); 1606 phba->cfg_rrq_xri_bitmap_sz);
1605 1607
1606 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) 1608 spin_lock_irq(shost->host_lock);
1607 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1609 keep_nlp_flag = new_ndlp->nlp_flag;
1608 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1610 new_ndlp->nlp_flag = ndlp->nlp_flag;
1611 ndlp->nlp_flag = keep_nlp_flag;
1612 spin_unlock_irq(shost->host_lock);
1609 1613
1610 /* Set state will put new_ndlp on to node list if not already done */ 1614 /* Set nlp_states accordingly */
1615 keep_nlp_state = new_ndlp->nlp_state;
1611 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1616 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
1612 1617
1613 /* Move this back to NPR state */ 1618 /* Move this back to NPR state */
@@ -1624,8 +1629,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1624 if (rport) { 1629 if (rport) {
1625 rdata = rport->dd_data; 1630 rdata = rport->dd_data;
1626 if (rdata->pnode == ndlp) { 1631 if (rdata->pnode == ndlp) {
1627 lpfc_nlp_put(ndlp); 1632 /* break the link before dropping the ref */
1628 ndlp->rport = NULL; 1633 ndlp->rport = NULL;
1634 lpfc_nlp_put(ndlp);
1629 rdata->pnode = lpfc_nlp_get(new_ndlp); 1635 rdata->pnode = lpfc_nlp_get(new_ndlp);
1630 new_ndlp->rport = rport; 1636 new_ndlp->rport = rport;
1631 } 1637 }
@@ -1648,7 +1654,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1648 memcpy(ndlp->active_rrqs_xri_bitmap, 1654 memcpy(ndlp->active_rrqs_xri_bitmap,
1649 active_rrqs_xri_bitmap, 1655 active_rrqs_xri_bitmap,
1650 phba->cfg_rrq_xri_bitmap_sz); 1656 phba->cfg_rrq_xri_bitmap_sz);
1651 lpfc_drop_node(vport, ndlp); 1657
1658 if (!NLP_CHK_NODE_ACT(ndlp))
1659 lpfc_drop_node(vport, ndlp);
1652 } 1660 }
1653 else { 1661 else {
1654 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1662 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -1665,20 +1673,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1665 active_rrqs_xri_bitmap, 1673 active_rrqs_xri_bitmap,
1666 phba->cfg_rrq_xri_bitmap_sz); 1674 phba->cfg_rrq_xri_bitmap_sz);
1667 1675
1668 /* Since we are swapping the ndlp passed in with the new one 1676 /* Since we are switching over to the new_ndlp,
1669 * and the did has already been swapped, copy over state. 1677 * reset the old ndlp state
1670 * The new WWNs are already in new_ndlp since thats what
1671 * we looked it up by in the begining of this routine.
1672 */
1673 new_ndlp->nlp_state = ndlp->nlp_state;
1674
1675 /* Since we are switching over to the new_ndlp, the old
1676 * ndlp should be put in the NPR state, unless we have
1677 * already started re-discovery on it.
1678 */ 1678 */
1679 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1679 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
1680 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1680 (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
1681 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1681 keep_nlp_state = NLP_STE_NPR_NODE;
1682 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1682 1683
1683 /* Fix up the rport accordingly */ 1684 /* Fix up the rport accordingly */
1684 rport = ndlp->rport; 1685 rport = ndlp->rport;
@@ -3667,15 +3668,6 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3667 * At this point, the driver is done so release the IOCB 3668 * At this point, the driver is done so release the IOCB
3668 */ 3669 */
3669 lpfc_els_free_iocb(phba, cmdiocb); 3670 lpfc_els_free_iocb(phba, cmdiocb);
3670
3671 /*
3672 * Remove the ndlp reference if it's a fabric node that has
3673 * sent us an unsolicted LOGO.
3674 */
3675 if (ndlp->nlp_type & NLP_FABRIC)
3676 lpfc_nlp_put(ndlp);
3677
3678 return;
3679} 3671}
3680 3672
3681/** 3673/**
@@ -4020,7 +4012,9 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
4020 ndlp->nlp_rpi, vport->fc_flag); 4012 ndlp->nlp_rpi, vport->fc_flag);
4021 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 4013 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
4022 spin_lock_irq(shost->host_lock); 4014 spin_lock_irq(shost->host_lock);
4023 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 4015 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
4016 ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
4017 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4024 spin_unlock_irq(shost->host_lock); 4018 spin_unlock_irq(shost->host_lock);
4025 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 4019 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
4026 } else { 4020 } else {
@@ -4587,16 +4581,16 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
4587 if (!NLP_CHK_NODE_ACT(ndlp)) 4581 if (!NLP_CHK_NODE_ACT(ndlp))
4588 continue; 4582 continue;
4589 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 4583 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
4590 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 4584 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4591 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 4585 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
4592 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 4586 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
4593 ndlp->nlp_prev_state = ndlp->nlp_state; 4587 ndlp->nlp_prev_state = ndlp->nlp_state;
4594 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4588 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4595 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 4589 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
4596 sentplogi++; 4590 sentplogi++;
4597 vport->num_disc_nodes++; 4591 vport->num_disc_nodes++;
4598 if (vport->num_disc_nodes >= 4592 if (vport->num_disc_nodes >=
4599 vport->cfg_discovery_threads) { 4593 vport->cfg_discovery_threads) {
4600 spin_lock_irq(shost->host_lock); 4594 spin_lock_irq(shost->host_lock);
4601 vport->fc_flag |= FC_NLP_MORE; 4595 vport->fc_flag |= FC_NLP_MORE;
4602 spin_unlock_irq(shost->host_lock); 4596 spin_unlock_irq(shost->host_lock);
@@ -4615,6 +4609,660 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
4615 return sentplogi; 4609 return sentplogi;
4616} 4610}
4617 4611
4612void
4613lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
4614 uint32_t word0)
4615{
4616
4617 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG);
4618 desc->payload.els_req = word0;
4619 desc->length = cpu_to_be32(sizeof(desc->payload));
4620}
4621
4622void
4623lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
4624 uint8_t *page_a0, uint8_t *page_a2)
4625{
4626 uint16_t wavelength;
4627 uint16_t temperature;
4628 uint16_t rx_power;
4629 uint16_t tx_bias;
4630 uint16_t tx_power;
4631 uint16_t vcc;
4632 uint16_t flag = 0;
4633 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4;
4634 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5;
4635
4636 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG);
4637
4638 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *)
4639 &page_a0[SSF_TRANSCEIVER_CODE_B4];
4640 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *)
4641 &page_a0[SSF_TRANSCEIVER_CODE_B5];
4642
4643 if ((trasn_code_byte4->fc_sw_laser) ||
4644 (trasn_code_byte5->fc_sw_laser_sl) ||
4645 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */
4646 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT);
4647 } else if (trasn_code_byte4->fc_lw_laser) {
4648 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) |
4649 page_a0[SSF_WAVELENGTH_B0];
4650 if (wavelength == SFP_WAVELENGTH_LC1310)
4651 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT;
4652 if (wavelength == SFP_WAVELENGTH_LL1550)
4653 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT;
4654 }
4655 /* check if its SFP+ */
4656 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ?
4657 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN)
4658 << SFP_FLAG_CT_SHIFT;
4659
4660 /* check if its OPTICAL */
4661 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ?
4662 SFP_FLAG_IS_OPTICAL_PORT : 0)
4663 << SFP_FLAG_IS_OPTICAL_SHIFT;
4664
4665 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 |
4666 page_a2[SFF_TEMPERATURE_B0]);
4667 vcc = (page_a2[SFF_VCC_B1] << 8 |
4668 page_a2[SFF_VCC_B0]);
4669 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 |
4670 page_a2[SFF_TXPOWER_B0]);
4671 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 |
4672 page_a2[SFF_TX_BIAS_CURRENT_B0]);
4673 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 |
4674 page_a2[SFF_RXPOWER_B0]);
4675 desc->sfp_info.temperature = cpu_to_be16(temperature);
4676 desc->sfp_info.rx_power = cpu_to_be16(rx_power);
4677 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias);
4678 desc->sfp_info.tx_power = cpu_to_be16(tx_power);
4679 desc->sfp_info.vcc = cpu_to_be16(vcc);
4680
4681 desc->sfp_info.flags = cpu_to_be16(flag);
4682 desc->length = cpu_to_be32(sizeof(desc->sfp_info));
4683}
4684
4685void
4686lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
4687 READ_LNK_VAR *stat)
4688{
4689 uint32_t type;
4690
4691 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG);
4692
4693 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT;
4694
4695 desc->info.port_type = cpu_to_be32(type);
4696
4697 desc->info.link_status.link_failure_cnt =
4698 cpu_to_be32(stat->linkFailureCnt);
4699 desc->info.link_status.loss_of_synch_cnt =
4700 cpu_to_be32(stat->lossSyncCnt);
4701 desc->info.link_status.loss_of_signal_cnt =
4702 cpu_to_be32(stat->lossSignalCnt);
4703 desc->info.link_status.primitive_seq_proto_err =
4704 cpu_to_be32(stat->primSeqErrCnt);
4705 desc->info.link_status.invalid_trans_word =
4706 cpu_to_be32(stat->invalidXmitWord);
4707 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt);
4708
4709 desc->length = cpu_to_be32(sizeof(desc->info));
4710}
4711
4712void
4713lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
4714{
4715 uint16_t rdp_cap = 0;
4716 uint16_t rdp_speed;
4717
4718 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
4719
4720 switch (phba->sli4_hba.link_state.speed) {
4721 case LPFC_FC_LA_SPEED_1G:
4722 rdp_speed = RDP_PS_1GB;
4723 break;
4724 case LPFC_FC_LA_SPEED_2G:
4725 rdp_speed = RDP_PS_2GB;
4726 break;
4727 case LPFC_FC_LA_SPEED_4G:
4728 rdp_speed = RDP_PS_4GB;
4729 break;
4730 case LPFC_FC_LA_SPEED_8G:
4731 rdp_speed = RDP_PS_8GB;
4732 break;
4733 case LPFC_FC_LA_SPEED_10G:
4734 rdp_speed = RDP_PS_10GB;
4735 break;
4736 case LPFC_FC_LA_SPEED_16G:
4737 rdp_speed = RDP_PS_16GB;
4738 break;
4739 case LPFC_FC_LA_SPEED_32G:
4740 rdp_speed = RDP_PS_32GB;
4741 break;
4742 default:
4743 rdp_speed = RDP_PS_UNKNOWN;
4744 break;
4745 }
4746
4747 desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
4748
4749 if (phba->lmt & LMT_16Gb)
4750 rdp_cap |= RDP_PS_16GB;
4751 if (phba->lmt & LMT_10Gb)
4752 rdp_cap |= RDP_PS_10GB;
4753 if (phba->lmt & LMT_8Gb)
4754 rdp_cap |= RDP_PS_8GB;
4755 if (phba->lmt & LMT_4Gb)
4756 rdp_cap |= RDP_PS_4GB;
4757 if (phba->lmt & LMT_2Gb)
4758 rdp_cap |= RDP_PS_2GB;
4759 if (phba->lmt & LMT_1Gb)
4760 rdp_cap |= RDP_PS_1GB;
4761
4762 if (rdp_cap == 0)
4763 rdp_cap = RDP_CAP_UNKNOWN;
4764
4765 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
4766 desc->length = cpu_to_be32(sizeof(desc->info));
4767}
4768
4769void
4770lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
4771 struct lpfc_hba *phba)
4772{
4773
4774 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
4775
4776 memcpy(desc->port_names.wwnn, phba->wwnn,
4777 sizeof(desc->port_names.wwnn));
4778
4779 memcpy(desc->port_names.wwpn, &phba->wwpn,
4780 sizeof(desc->port_names.wwpn));
4781
4782 desc->length = cpu_to_be32(sizeof(desc->port_names));
4783}
4784
4785void
4786lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
4787 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4788{
4789
4790 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
4791 if (vport->fc_flag & FC_FABRIC) {
4792 memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
4793 sizeof(desc->port_names.wwnn));
4794
4795 memcpy(desc->port_names.wwpn, &vport->fabric_portname,
4796 sizeof(desc->port_names.wwpn));
4797 } else { /* Point to Point */
4798 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
4799 sizeof(desc->port_names.wwnn));
4800
4801 memcpy(desc->port_names.wwnn, &ndlp->nlp_portname,
4802 sizeof(desc->port_names.wwpn));
4803 }
4804
4805 desc->length = cpu_to_be32(sizeof(desc->port_names));
4806}
4807
4808void
4809lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
4810 int status)
4811{
4812 struct lpfc_nodelist *ndlp = rdp_context->ndlp;
4813 struct lpfc_vport *vport = ndlp->vport;
4814 struct lpfc_iocbq *elsiocb;
4815 IOCB_t *icmd;
4816 uint8_t *pcmd;
4817 struct ls_rjt *stat;
4818 struct fc_rdp_res_frame *rdp_res;
4819 uint32_t cmdsize;
4820 int rc;
4821
4822 if (status != SUCCESS)
4823 goto error;
4824 cmdsize = sizeof(struct fc_rdp_res_frame);
4825
4826 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
4827 lpfc_max_els_tries, rdp_context->ndlp,
4828 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
4829 lpfc_nlp_put(ndlp);
4830 if (!elsiocb)
4831 goto free_rdp_context;
4832
4833 icmd = &elsiocb->iocb;
4834 icmd->ulpContext = rdp_context->rx_id;
4835 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
4836
4837 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4838 "2171 Xmit RDP response tag x%x xri x%x, "
4839 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
4840 elsiocb->iotag, elsiocb->iocb.ulpContext,
4841 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4842 ndlp->nlp_rpi);
4843 rdp_res = (struct fc_rdp_res_frame *)
4844 (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4845 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4846 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
4847 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4848
4849 /* For RDP payload */
4850 lpfc_rdp_res_link_service(&rdp_res->link_service_desc, ELS_CMD_RDP);
4851
4852 lpfc_rdp_res_sfp_desc(&rdp_res->sfp_desc,
4853 rdp_context->page_a0, rdp_context->page_a2);
4854 lpfc_rdp_res_speed(&rdp_res->portspeed_desc, phba);
4855 lpfc_rdp_res_link_error(&rdp_res->link_error_desc,
4856 &rdp_context->link_stat);
4857 lpfc_rdp_res_diag_port_names(&rdp_res->diag_port_names_desc, phba);
4858 lpfc_rdp_res_attach_port_names(&rdp_res->attached_port_names_desc,
4859 vport, ndlp);
4860 rdp_res->length = cpu_to_be32(RDP_DESC_PAYLOAD_SIZE);
4861
4862 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4863
4864 phba->fc_stat.elsXmitACC++;
4865 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4866 if (rc == IOCB_ERROR)
4867 lpfc_els_free_iocb(phba, elsiocb);
4868
4869 kfree(rdp_context);
4870
4871 return;
4872error:
4873 cmdsize = 2 * sizeof(uint32_t);
4874 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries,
4875 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
4876 lpfc_nlp_put(ndlp);
4877 if (!elsiocb)
4878 goto free_rdp_context;
4879
4880 icmd = &elsiocb->iocb;
4881 icmd->ulpContext = rdp_context->rx_id;
4882 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
4883 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4884
4885 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
4886 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
4887 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4888
4889 phba->fc_stat.elsXmitLSRJT++;
4890 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4891 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4892
4893 if (rc == IOCB_ERROR)
4894 lpfc_els_free_iocb(phba, elsiocb);
4895free_rdp_context:
4896 kfree(rdp_context);
4897}
4898
4899int
4900lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
4901{
4902 LPFC_MBOXQ_t *mbox = NULL;
4903 int rc;
4904
4905 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4906 if (!mbox) {
4907 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS,
4908 "7105 failed to allocate mailbox memory");
4909 return 1;
4910 }
4911
4912 if (lpfc_sli4_dump_page_a0(phba, mbox))
4913 goto prep_mbox_fail;
4914 mbox->vport = rdp_context->ndlp->vport;
4915 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
4916 mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
4917 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4918 if (rc == MBX_NOT_FINISHED)
4919 goto issue_mbox_fail;
4920
4921 return 0;
4922
4923prep_mbox_fail:
4924issue_mbox_fail:
4925 mempool_free(mbox, phba->mbox_mem_pool);
4926 return 1;
4927}
4928
4929/*
4930 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS.
4931 * @vport: pointer to a host virtual N_Port data structure.
4932 * @cmdiocb: pointer to lpfc command iocb data structure.
4933 * @ndlp: pointer to a node-list data structure.
4934 *
4935 * This routine processes an unsolicited RDP(Read Diagnostic Parameters)
4936 * IOCB. First, the payload of the unsolicited RDP is checked.
4937 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3
4938 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2,
4939 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl
4940 * gather all data and send RDP response.
4941 *
4942 * Return code
4943 * 0 - Sent the acc response
4944 * 1 - Sent the reject response.
4945 */
4946static int
4947lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4948 struct lpfc_nodelist *ndlp)
4949{
4950 struct lpfc_hba *phba = vport->phba;
4951 struct lpfc_dmabuf *pcmd;
4952 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
4953 struct fc_rdp_req_frame *rdp_req;
4954 struct lpfc_rdp_context *rdp_context;
4955 IOCB_t *cmd = NULL;
4956 struct ls_rjt stat;
4957
4958 if (phba->sli_rev < LPFC_SLI_REV4 ||
4959 (bf_get(lpfc_sli_intf_if_type,
4960 &phba->sli4_hba.sli_intf) !=
4961 LPFC_SLI_INTF_IF_TYPE_2)) {
4962 rjt_err = LSRJT_UNABLE_TPC;
4963 rjt_expl = LSEXP_REQ_UNSUPPORTED;
4964 goto error;
4965 }
4966
4967 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) {
4968 rjt_err = LSRJT_UNABLE_TPC;
4969 rjt_expl = LSEXP_REQ_UNSUPPORTED;
4970 goto error;
4971 }
4972
4973 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4974 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
4975
4976
4977 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4978 "2422 ELS RDP Request "
4979 "dec len %d tag x%x port_id %d len %d\n",
4980 be32_to_cpu(rdp_req->rdp_des_length),
4981 be32_to_cpu(rdp_req->nport_id_desc.tag),
4982 be32_to_cpu(rdp_req->nport_id_desc.nport_id),
4983 be32_to_cpu(rdp_req->nport_id_desc.length));
4984
4985 if (sizeof(struct fc_rdp_nport_desc) !=
4986 be32_to_cpu(rdp_req->rdp_des_length))
4987 goto rjt_logerr;
4988 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag))
4989 goto rjt_logerr;
4990 if (RDP_NPORT_ID_SIZE !=
4991 be32_to_cpu(rdp_req->nport_id_desc.length))
4992 goto rjt_logerr;
4993 rdp_context = kmalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL);
4994 if (!rdp_context) {
4995 rjt_err = LSRJT_UNABLE_TPC;
4996 goto error;
4997 }
4998
4999 memset(rdp_context, 0, sizeof(struct lpfc_rdp_context));
5000 cmd = &cmdiocb->iocb;
5001 rdp_context->ndlp = lpfc_nlp_get(ndlp);
5002 rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id;
5003 rdp_context->rx_id = cmd->ulpContext;
5004 rdp_context->cmpl = lpfc_els_rdp_cmpl;
5005 if (lpfc_get_rdp_info(phba, rdp_context)) {
5006 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
5007 "2423 Unable to send mailbox");
5008 kfree(rdp_context);
5009 rjt_err = LSRJT_UNABLE_TPC;
5010 lpfc_nlp_put(ndlp);
5011 goto error;
5012 }
5013
5014 return 0;
5015
5016rjt_logerr:
5017 rjt_err = LSRJT_LOGICAL_ERR;
5018
5019error:
5020 memset(&stat, 0, sizeof(stat));
5021 stat.un.b.lsRjtRsnCode = rjt_err;
5022 stat.un.b.lsRjtRsnCodeExp = rjt_expl;
5023 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5024 return 1;
5025}
5026
5027
5028static void
5029lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5030{
5031 MAILBOX_t *mb;
5032 IOCB_t *icmd;
5033 uint8_t *pcmd;
5034 struct lpfc_iocbq *elsiocb;
5035 struct lpfc_nodelist *ndlp;
5036 struct ls_rjt *stat;
5037 union lpfc_sli4_cfg_shdr *shdr;
5038 struct lpfc_lcb_context *lcb_context;
5039 struct fc_lcb_res_frame *lcb_res;
5040 uint32_t cmdsize, shdr_status, shdr_add_status;
5041 int rc;
5042
5043 mb = &pmb->u.mb;
5044 lcb_context = (struct lpfc_lcb_context *)pmb->context1;
5045 ndlp = lcb_context->ndlp;
5046 pmb->context1 = NULL;
5047 pmb->context2 = NULL;
5048
5049 shdr = (union lpfc_sli4_cfg_shdr *)
5050 &pmb->u.mqe.un.beacon_config.header.cfg_shdr;
5051 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5052 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5053
5054 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX,
5055 "0194 SET_BEACON_CONFIG mailbox "
5056 "completed with status x%x add_status x%x,"
5057 " mbx status x%x\n",
5058 shdr_status, shdr_add_status, mb->mbxStatus);
5059
5060 if (mb->mbxStatus && !(shdr_status &&
5061 shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)) {
5062 mempool_free(pmb, phba->mbox_mem_pool);
5063 goto error;
5064 }
5065
5066 mempool_free(pmb, phba->mbox_mem_pool);
5067 cmdsize = sizeof(struct fc_lcb_res_frame);
5068 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5069 lpfc_max_els_tries, ndlp,
5070 ndlp->nlp_DID, ELS_CMD_ACC);
5071
5072 /* Decrement the ndlp reference count from previous mbox command */
5073 lpfc_nlp_put(ndlp);
5074
5075 if (!elsiocb)
5076 goto free_lcb_context;
5077
5078 lcb_res = (struct fc_lcb_res_frame *)
5079 (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
5080
5081 icmd = &elsiocb->iocb;
5082 icmd->ulpContext = lcb_context->rx_id;
5083 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
5084
5085 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
5086 *((uint32_t *)(pcmd)) = ELS_CMD_ACC;
5087 lcb_res->lcb_sub_command = lcb_context->sub_command;
5088 lcb_res->lcb_type = lcb_context->type;
5089 lcb_res->lcb_frequency = lcb_context->frequency;
5090 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5091 phba->fc_stat.elsXmitACC++;
5092 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5093 if (rc == IOCB_ERROR)
5094 lpfc_els_free_iocb(phba, elsiocb);
5095
5096 kfree(lcb_context);
5097 return;
5098
5099error:
5100 cmdsize = sizeof(struct fc_lcb_res_frame);
5101 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5102 lpfc_max_els_tries, ndlp,
5103 ndlp->nlp_DID, ELS_CMD_LS_RJT);
5104 lpfc_nlp_put(ndlp);
5105 if (!elsiocb)
5106 goto free_lcb_context;
5107
5108 icmd = &elsiocb->iocb;
5109 icmd->ulpContext = lcb_context->rx_id;
5110 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
5111 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
5112
5113 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
5114 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
5115 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5116
5117 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5118 phba->fc_stat.elsXmitLSRJT++;
5119 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5120 if (rc == IOCB_ERROR)
5121 lpfc_els_free_iocb(phba, elsiocb);
5122free_lcb_context:
5123 kfree(lcb_context);
5124}
5125
5126static int
5127lpfc_sli4_set_beacon(struct lpfc_vport *vport,
5128 struct lpfc_lcb_context *lcb_context,
5129 uint32_t beacon_state)
5130{
5131 struct lpfc_hba *phba = vport->phba;
5132 LPFC_MBOXQ_t *mbox = NULL;
5133 uint32_t len;
5134 int rc;
5135
5136 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5137 if (!mbox)
5138 return 1;
5139
5140 len = sizeof(struct lpfc_mbx_set_beacon_config) -
5141 sizeof(struct lpfc_sli4_cfg_mhdr);
5142 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5143 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
5144 LPFC_SLI4_MBX_EMBED);
5145 mbox->context1 = (void *)lcb_context;
5146 mbox->vport = phba->pport;
5147 mbox->mbox_cmpl = lpfc_els_lcb_rsp;
5148 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
5149 phba->sli4_hba.physical_port);
5150 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
5151 beacon_state);
5152 bf_set(lpfc_mbx_set_beacon_port_type, &mbox->u.mqe.un.beacon_config, 1);
5153 bf_set(lpfc_mbx_set_beacon_duration, &mbox->u.mqe.un.beacon_config, 0);
5154 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5155 if (rc == MBX_NOT_FINISHED) {
5156 mempool_free(mbox, phba->mbox_mem_pool);
5157 return 1;
5158 }
5159
5160 return 0;
5161}
5162
5163
5164/**
5165 * lpfc_els_rcv_lcb - Process an unsolicited LCB
5166 * @vport: pointer to a host virtual N_Port data structure.
5167 * @cmdiocb: pointer to lpfc command iocb data structure.
5168 * @ndlp: pointer to a node-list data structure.
5169 *
5170 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB.
5171 * First, the payload of the unsolicited LCB is checked.
5172 * Then based on Subcommand beacon will either turn on or off.
5173 *
5174 * Return code
5175 * 0 - Sent the acc response
5176 * 1 - Sent the reject response.
5177 **/
5178static int
5179lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5180 struct lpfc_nodelist *ndlp)
5181{
5182 struct lpfc_hba *phba = vport->phba;
5183 struct lpfc_dmabuf *pcmd;
5184 IOCB_t *icmd;
5185 uint8_t *lp;
5186 struct fc_lcb_request_frame *beacon;
5187 struct lpfc_lcb_context *lcb_context;
5188 uint8_t state, rjt_err;
5189 struct ls_rjt stat;
5190
5191 icmd = &cmdiocb->iocb;
5192 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
5193 lp = (uint8_t *)pcmd->virt;
5194 beacon = (struct fc_lcb_request_frame *)pcmd->virt;
5195
5196 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5197 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x "
5198 "type x%x frequency %x duration x%x\n",
5199 lp[0], lp[1], lp[2],
5200 beacon->lcb_command,
5201 beacon->lcb_sub_command,
5202 beacon->lcb_type,
5203 beacon->lcb_frequency,
5204 be16_to_cpu(beacon->lcb_duration));
5205
5206 if (phba->sli_rev < LPFC_SLI_REV4 ||
5207 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
5208 LPFC_SLI_INTF_IF_TYPE_2)) {
5209 rjt_err = LSRJT_CMD_UNSUPPORTED;
5210 goto rjt;
5211 }
5212 lcb_context = kmalloc(sizeof(struct lpfc_lcb_context), GFP_KERNEL);
5213
5214 if (phba->hba_flag & HBA_FCOE_MODE) {
5215 rjt_err = LSRJT_CMD_UNSUPPORTED;
5216 goto rjt;
5217 }
5218 if (beacon->lcb_frequency == 0) {
5219 rjt_err = LSRJT_CMD_UNSUPPORTED;
5220 goto rjt;
5221 }
5222 if ((beacon->lcb_type != LPFC_LCB_GREEN) &&
5223 (beacon->lcb_type != LPFC_LCB_AMBER)) {
5224 rjt_err = LSRJT_CMD_UNSUPPORTED;
5225 goto rjt;
5226 }
5227 if ((beacon->lcb_sub_command != LPFC_LCB_ON) &&
5228 (beacon->lcb_sub_command != LPFC_LCB_OFF)) {
5229 rjt_err = LSRJT_CMD_UNSUPPORTED;
5230 goto rjt;
5231 }
5232 if ((beacon->lcb_sub_command == LPFC_LCB_ON) &&
5233 (beacon->lcb_type != LPFC_LCB_GREEN) &&
5234 (beacon->lcb_type != LPFC_LCB_AMBER)) {
5235 rjt_err = LSRJT_CMD_UNSUPPORTED;
5236 goto rjt;
5237 }
5238 if (be16_to_cpu(beacon->lcb_duration) != 0) {
5239 rjt_err = LSRJT_CMD_UNSUPPORTED;
5240 goto rjt;
5241 }
5242
5243 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
5244 lcb_context->sub_command = beacon->lcb_sub_command;
5245 lcb_context->type = beacon->lcb_type;
5246 lcb_context->frequency = beacon->lcb_frequency;
5247 lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
5248 lcb_context->rx_id = cmdiocb->iocb.ulpContext;
5249 lcb_context->ndlp = lpfc_nlp_get(ndlp);
5250 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) {
5251 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
5252 LOG_ELS, "0193 failed to send mail box");
5253 lpfc_nlp_put(ndlp);
5254 rjt_err = LSRJT_UNABLE_TPC;
5255 goto rjt;
5256 }
5257 return 0;
5258rjt:
5259 memset(&stat, 0, sizeof(stat));
5260 stat.un.b.lsRjtRsnCode = rjt_err;
5261 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5262 return 1;
5263}
5264
5265
4618/** 5266/**
4619 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 5267 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
4620 * @vport: pointer to a host virtual N_Port data structure. 5268 * @vport: pointer to a host virtual N_Port data structure.
@@ -6706,8 +7354,13 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6706 * Do not process any unsolicited ELS commands 7354 * Do not process any unsolicited ELS commands
6707 * if the ndlp is in DEV_LOSS 7355 * if the ndlp is in DEV_LOSS
6708 */ 7356 */
6709 if (ndlp->nlp_add_flag & NLP_IN_DEV_LOSS) 7357 shost = lpfc_shost_from_vport(vport);
7358 spin_lock_irq(shost->host_lock);
7359 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
7360 spin_unlock_irq(shost->host_lock);
6710 goto dropit; 7361 goto dropit;
7362 }
7363 spin_unlock_irq(shost->host_lock);
6711 7364
6712 elsiocb->context1 = lpfc_nlp_get(ndlp); 7365 elsiocb->context1 = lpfc_nlp_get(ndlp);
6713 elsiocb->vport = vport; 7366 elsiocb->vport = vport;
@@ -6751,7 +7404,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6751 rjt_exp = LSEXP_NOTHING_MORE; 7404 rjt_exp = LSEXP_NOTHING_MORE;
6752 break; 7405 break;
6753 } 7406 }
6754 shost = lpfc_shost_from_vport(vport);
6755 if (vport->port_state < LPFC_DISC_AUTH) { 7407 if (vport->port_state < LPFC_DISC_AUTH) {
6756 if (!(phba->pport->fc_flag & FC_PT2PT) || 7408 if (!(phba->pport->fc_flag & FC_PT2PT) ||
6757 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 7409 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -6821,6 +7473,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6821 } 7473 }
6822 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 7474 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
6823 break; 7475 break;
7476 case ELS_CMD_LCB:
7477 phba->fc_stat.elsRcvLCB++;
7478 lpfc_els_rcv_lcb(vport, elsiocb, ndlp);
7479 break;
7480 case ELS_CMD_RDP:
7481 phba->fc_stat.elsRcvRDP++;
7482 lpfc_els_rcv_rdp(vport, elsiocb, ndlp);
7483 break;
6824 case ELS_CMD_RSCN: 7484 case ELS_CMD_RSCN:
6825 phba->fc_stat.elsRcvRSCN++; 7485 phba->fc_stat.elsRcvRSCN++;
6826 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 7486 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
@@ -7586,7 +8246,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7586 lpfc_do_scr_ns_plogi(phba, vport); 8246 lpfc_do_scr_ns_plogi(phba, vport);
7587 goto out; 8247 goto out;
7588fdisc_failed: 8248fdisc_failed:
7589 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 8249 if (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)
8250 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
7590 /* Cancel discovery timer */ 8251 /* Cancel discovery timer */
7591 lpfc_can_disctmo(vport); 8252 lpfc_can_disctmo(vport);
7592 lpfc_nlp_put(ndlp); 8253 lpfc_nlp_put(ndlp);
@@ -7739,8 +8400,10 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7739 8400
7740 if (irsp->ulpStatus == IOSTAT_SUCCESS) { 8401 if (irsp->ulpStatus == IOSTAT_SUCCESS) {
7741 spin_lock_irq(shost->host_lock); 8402 spin_lock_irq(shost->host_lock);
8403 vport->fc_flag &= ~FC_NDISC_ACTIVE;
7742 vport->fc_flag &= ~FC_FABRIC; 8404 vport->fc_flag &= ~FC_FABRIC;
7743 spin_unlock_irq(shost->host_lock); 8405 spin_unlock_irq(shost->host_lock);
8406 lpfc_can_disctmo(vport);
7744 } 8407 }
7745} 8408}
7746 8409
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2500f15d437f..ce96d5bf8ae7 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -106,6 +106,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
106 struct lpfc_rport_data *rdata; 106 struct lpfc_rport_data *rdata;
107 struct lpfc_nodelist * ndlp; 107 struct lpfc_nodelist * ndlp;
108 struct lpfc_vport *vport; 108 struct lpfc_vport *vport;
109 struct Scsi_Host *shost;
109 struct lpfc_hba *phba; 110 struct lpfc_hba *phba;
110 struct lpfc_work_evt *evtp; 111 struct lpfc_work_evt *evtp;
111 int put_node; 112 int put_node;
@@ -146,48 +147,32 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
146 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) 147 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
147 return; 148 return;
148 149
149 if (ndlp->nlp_type & NLP_FABRIC) { 150 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
150 151 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
151 /* If the WWPN of the rport and ndlp don't match, ignore it */ 152 "6789 rport name %llx != node port name %llx",
152 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) { 153 rport->port_name,
153 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 154 wwn_to_u64(ndlp->nlp_portname.u.wwn));
154 "6789 rport name %lx != node port name %lx",
155 (unsigned long)rport->port_name,
156 (unsigned long)wwn_to_u64(
157 ndlp->nlp_portname.u.wwn));
158 put_node = rdata->pnode != NULL;
159 put_rport = ndlp->rport != NULL;
160 rdata->pnode = NULL;
161 ndlp->rport = NULL;
162 if (put_node)
163 lpfc_nlp_put(ndlp);
164 put_device(&rport->dev);
165 return;
166 }
167
168 put_node = rdata->pnode != NULL;
169 put_rport = ndlp->rport != NULL;
170 rdata->pnode = NULL;
171 ndlp->rport = NULL;
172 if (put_node)
173 lpfc_nlp_put(ndlp);
174 if (put_rport)
175 put_device(&rport->dev);
176 return;
177 }
178 155
179 evtp = &ndlp->dev_loss_evt; 156 evtp = &ndlp->dev_loss_evt;
180 157
181 if (!list_empty(&evtp->evt_listp)) 158 if (!list_empty(&evtp->evt_listp)) {
159 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
160 "6790 rport name %llx dev_loss_evt pending",
161 rport->port_name);
182 return; 162 return;
163 }
183 164
184 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 165 shost = lpfc_shost_from_vport(vport);
185 ndlp->nlp_add_flag |= NLP_IN_DEV_LOSS; 166 spin_lock_irq(shost->host_lock);
167 ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
168 spin_unlock_irq(shost->host_lock);
186 169
187 spin_lock_irq(&phba->hbalock);
188 /* We need to hold the node by incrementing the reference 170 /* We need to hold the node by incrementing the reference
189 * count until this queued work is done 171 * count until this queued work is done
190 */ 172 */
173 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
174
175 spin_lock_irq(&phba->hbalock);
191 if (evtp->evt_arg1) { 176 if (evtp->evt_arg1) {
192 evtp->evt = LPFC_EVT_DEV_LOSS; 177 evtp->evt = LPFC_EVT_DEV_LOSS;
193 list_add_tail(&evtp->evt_listp, &phba->work_list); 178 list_add_tail(&evtp->evt_listp, &phba->work_list);
@@ -215,22 +200,24 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
215 struct fc_rport *rport; 200 struct fc_rport *rport;
216 struct lpfc_vport *vport; 201 struct lpfc_vport *vport;
217 struct lpfc_hba *phba; 202 struct lpfc_hba *phba;
203 struct Scsi_Host *shost;
218 uint8_t *name; 204 uint8_t *name;
219 int put_node; 205 int put_node;
220 int put_rport;
221 int warn_on = 0; 206 int warn_on = 0;
222 int fcf_inuse = 0; 207 int fcf_inuse = 0;
223 208
224 rport = ndlp->rport; 209 rport = ndlp->rport;
210 vport = ndlp->vport;
211 shost = lpfc_shost_from_vport(vport);
212
213 spin_lock_irq(shost->host_lock);
214 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
215 spin_unlock_irq(shost->host_lock);
225 216
226 if (!rport) { 217 if (!rport)
227 ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
228 return fcf_inuse; 218 return fcf_inuse;
229 }
230 219
231 rdata = rport->dd_data;
232 name = (uint8_t *) &ndlp->nlp_portname; 220 name = (uint8_t *) &ndlp->nlp_portname;
233 vport = ndlp->vport;
234 phba = vport->phba; 221 phba = vport->phba;
235 222
236 if (phba->sli_rev == LPFC_SLI_REV4) 223 if (phba->sli_rev == LPFC_SLI_REV4)
@@ -244,6 +231,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
244 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n", 231 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
245 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag); 232 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
246 233
234 /*
235 * lpfc_nlp_remove if reached with dangling rport drops the
236 * reference. To make sure that does not happen clear rport
237 * pointer in ndlp before lpfc_nlp_put.
238 */
239 rdata = rport->dd_data;
240
247 /* Don't defer this if we are in the process of deleting the vport 241 /* Don't defer this if we are in the process of deleting the vport
248 * or unloading the driver. The unload will cleanup the node 242 * or unloading the driver. The unload will cleanup the node
249 * appropriately we just need to cleanup the ndlp rport info here. 243 * appropriately we just need to cleanup the ndlp rport info here.
@@ -256,14 +250,12 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
256 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 250 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
257 } 251 }
258 put_node = rdata->pnode != NULL; 252 put_node = rdata->pnode != NULL;
259 put_rport = ndlp->rport != NULL;
260 rdata->pnode = NULL; 253 rdata->pnode = NULL;
261 ndlp->rport = NULL; 254 ndlp->rport = NULL;
262 ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
263 if (put_node) 255 if (put_node)
264 lpfc_nlp_put(ndlp); 256 lpfc_nlp_put(ndlp);
265 if (put_rport) 257 put_device(&rport->dev);
266 put_device(&rport->dev); 258
267 return fcf_inuse; 259 return fcf_inuse;
268 } 260 }
269 261
@@ -275,28 +267,21 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
275 *name, *(name+1), *(name+2), *(name+3), 267 *name, *(name+1), *(name+2), *(name+3),
276 *(name+4), *(name+5), *(name+6), *(name+7), 268 *(name+4), *(name+5), *(name+6), *(name+7),
277 ndlp->nlp_DID); 269 ndlp->nlp_DID);
278 ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
279 return fcf_inuse; 270 return fcf_inuse;
280 } 271 }
281 272
282 if (ndlp->nlp_type & NLP_FABRIC) { 273 put_node = rdata->pnode != NULL;
283 /* We will clean up these Nodes in linkup */ 274 rdata->pnode = NULL;
284 put_node = rdata->pnode != NULL; 275 ndlp->rport = NULL;
285 put_rport = ndlp->rport != NULL; 276 if (put_node)
286 rdata->pnode = NULL; 277 lpfc_nlp_put(ndlp);
287 ndlp->rport = NULL; 278 put_device(&rport->dev);
288 ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS; 279
289 if (put_node) 280 if (ndlp->nlp_type & NLP_FABRIC)
290 lpfc_nlp_put(ndlp);
291 if (put_rport)
292 put_device(&rport->dev);
293 return fcf_inuse; 281 return fcf_inuse;
294 }
295 282
296 if (ndlp->nlp_sid != NLP_NO_SID) { 283 if (ndlp->nlp_sid != NLP_NO_SID) {
297 warn_on = 1; 284 warn_on = 1;
298 /* flush the target */
299 ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
300 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 285 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
301 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 286 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
302 } 287 }
@@ -321,16 +306,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
321 ndlp->nlp_state, ndlp->nlp_rpi); 306 ndlp->nlp_state, ndlp->nlp_rpi);
322 } 307 }
323 308
324 put_node = rdata->pnode != NULL;
325 put_rport = ndlp->rport != NULL;
326 rdata->pnode = NULL;
327 ndlp->rport = NULL;
328 ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
329 if (put_node)
330 lpfc_nlp_put(ndlp);
331 if (put_rport)
332 put_device(&rport->dev);
333
334 if (!(vport->load_flag & FC_UNLOADING) && 309 if (!(vport->load_flag & FC_UNLOADING) &&
335 !(ndlp->nlp_flag & NLP_DELAY_TMO) && 310 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
336 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 311 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
@@ -1802,7 +1777,7 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1802 dma_addr_t phys_addr; 1777 dma_addr_t phys_addr;
1803 struct lpfc_mbx_sge sge; 1778 struct lpfc_mbx_sge sge;
1804 struct lpfc_mbx_read_fcf_tbl *read_fcf; 1779 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1805 uint32_t shdr_status, shdr_add_status; 1780 uint32_t shdr_status, shdr_add_status, if_type;
1806 union lpfc_sli4_cfg_shdr *shdr; 1781 union lpfc_sli4_cfg_shdr *shdr;
1807 struct fcf_record *new_fcf_record; 1782 struct fcf_record *new_fcf_record;
1808 1783
@@ -1823,9 +1798,11 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1823 lpfc_sli_pcimem_bcopy(shdr, shdr, 1798 lpfc_sli_pcimem_bcopy(shdr, shdr,
1824 sizeof(union lpfc_sli4_cfg_shdr)); 1799 sizeof(union lpfc_sli4_cfg_shdr));
1825 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 1800 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1801 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1826 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 1802 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1827 if (shdr_status || shdr_add_status) { 1803 if (shdr_status || shdr_add_status) {
1828 if (shdr_status == STATUS_FCF_TABLE_EMPTY) 1804 if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
1805 if_type == LPFC_SLI_INTF_IF_TYPE_2)
1829 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 1806 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1830 "2726 READ_FCF_RECORD Indicates empty " 1807 "2726 READ_FCF_RECORD Indicates empty "
1831 "FCF table.\n"); 1808 "FCF table.\n");
@@ -3868,11 +3845,11 @@ out:
3868 3845
3869 if (vport->port_state < LPFC_VPORT_READY) { 3846 if (vport->port_state < LPFC_VPORT_READY) {
3870 /* Link up discovery requires Fabric registration. */ 3847 /* Link up discovery requires Fabric registration. */
3871 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
3872 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); 3848 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
3873 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); 3849 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
3874 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); 3850 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
3875 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); 3851 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
3852 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0);
3876 3853
3877 /* Issue SCR just before NameServer GID_FT Query */ 3854 /* Issue SCR just before NameServer GID_FT Query */
3878 lpfc_issue_els_scr(vport, SCR_DID, 0); 3855 lpfc_issue_els_scr(vport, SCR_DID, 0);
@@ -3918,9 +3895,17 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3918 * registered port, drop the reference that we took the last time we 3895 * registered port, drop the reference that we took the last time we
3919 * registered the port. 3896 * registered the port.
3920 */ 3897 */
3921 if (ndlp->rport && ndlp->rport->dd_data && 3898 rport = ndlp->rport;
3922 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) 3899 if (rport) {
3923 lpfc_nlp_put(ndlp); 3900 rdata = rport->dd_data;
3901 /* break the link before dropping the ref */
3902 ndlp->rport = NULL;
3903 if (rdata && rdata->pnode == ndlp)
3904 lpfc_nlp_put(ndlp);
3905 rdata->pnode = NULL;
3906 /* drop reference for earlier registeration */
3907 put_device(&rport->dev);
3908 }
3924 3909
3925 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 3910 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
3926 "rport add: did:x%x flg:x%x type x%x", 3911 "rport add: did:x%x flg:x%x type x%x",
@@ -4296,9 +4281,9 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4296 if (vport->phba->sli_rev == LPFC_SLI_REV4) { 4281 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4297 lpfc_cleanup_vports_rrqs(vport, ndlp); 4282 lpfc_cleanup_vports_rrqs(vport, ndlp);
4298 lpfc_unreg_rpi(vport, ndlp); 4283 lpfc_unreg_rpi(vport, ndlp);
4299 } else {
4300 lpfc_nlp_put(ndlp);
4301 } 4284 }
4285
4286 lpfc_nlp_put(ndlp);
4302 return; 4287 return;
4303} 4288}
4304 4289
@@ -4510,7 +4495,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4510{ 4495{
4511 struct lpfc_hba *phba = vport->phba; 4496 struct lpfc_hba *phba = vport->phba;
4512 LPFC_MBOXQ_t *mbox; 4497 LPFC_MBOXQ_t *mbox;
4513 int rc; 4498 int rc, acc_plogi = 1;
4514 uint16_t rpi; 4499 uint16_t rpi;
4515 4500
4516 if (ndlp->nlp_flag & NLP_RPI_REGISTERED || 4501 if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
@@ -4543,14 +4528,20 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4543 mbox->context1 = lpfc_nlp_get(ndlp); 4528 mbox->context1 = lpfc_nlp_get(ndlp);
4544 mbox->mbox_cmpl = 4529 mbox->mbox_cmpl =
4545 lpfc_sli4_unreg_rpi_cmpl_clr; 4530 lpfc_sli4_unreg_rpi_cmpl_clr;
4531 /*
4532 * accept PLOGIs after unreg_rpi_cmpl
4533 */
4534 acc_plogi = 0;
4546 } else 4535 } else
4547 mbox->mbox_cmpl = 4536 mbox->mbox_cmpl =
4548 lpfc_sli_def_mbox_cmpl; 4537 lpfc_sli_def_mbox_cmpl;
4549 } 4538 }
4550 4539
4551 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4540 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4552 if (rc == MBX_NOT_FINISHED) 4541 if (rc == MBX_NOT_FINISHED) {
4553 mempool_free(mbox, phba->mbox_mem_pool); 4542 mempool_free(mbox, phba->mbox_mem_pool);
4543 acc_plogi = 1;
4544 }
4554 } 4545 }
4555 lpfc_no_rpi(phba, ndlp); 4546 lpfc_no_rpi(phba, ndlp);
4556 4547
@@ -4558,8 +4549,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4558 ndlp->nlp_rpi = 0; 4549 ndlp->nlp_rpi = 0;
4559 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 4550 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
4560 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 4551 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
4552 if (acc_plogi)
4553 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4561 return 1; 4554 return 1;
4562 } 4555 }
4556 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4563 return 0; 4557 return 0;
4564} 4558}
4565 4559
@@ -4761,6 +4755,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4761{ 4755{
4762 struct lpfc_hba *phba = vport->phba; 4756 struct lpfc_hba *phba = vport->phba;
4763 struct lpfc_rport_data *rdata; 4757 struct lpfc_rport_data *rdata;
4758 struct fc_rport *rport;
4764 LPFC_MBOXQ_t *mbox; 4759 LPFC_MBOXQ_t *mbox;
4765 int rc; 4760 int rc;
4766 4761
@@ -4798,14 +4793,24 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4798 lpfc_cleanup_node(vport, ndlp); 4793 lpfc_cleanup_node(vport, ndlp);
4799 4794
4800 /* 4795 /*
4801 * We can get here with a non-NULL ndlp->rport because when we 4796 * ndlp->rport must be set to NULL before it reaches here
4802 * unregister a rport we don't break the rport/node linkage. So if we 4797 * i.e. break rport/node link before doing lpfc_nlp_put for
4803 * do, make sure we don't leaving any dangling pointers behind. 4798 * registered rport and then drop the reference of rport.
4804 */ 4799 */
4805 if (ndlp->rport) { 4800 if (ndlp->rport) {
4806 rdata = ndlp->rport->dd_data; 4801 /*
4802 * extra lpfc_nlp_put dropped the reference of ndlp
4803 * for registered rport so need to cleanup rport
4804 */
4805 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4806 "0940 removed node x%p DID x%x "
4807 " rport not null %p\n",
4808 ndlp, ndlp->nlp_DID, ndlp->rport);
4809 rport = ndlp->rport;
4810 rdata = rport->dd_data;
4807 rdata->pnode = NULL; 4811 rdata->pnode = NULL;
4808 ndlp->rport = NULL; 4812 ndlp->rport = NULL;
4813 put_device(&rport->dev);
4809 } 4814 }
4810} 4815}
4811 4816
@@ -4833,9 +4838,19 @@ lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4833 if (matchdid.un.b.id == ndlpdid.un.b.id) { 4838 if (matchdid.un.b.id == ndlpdid.un.b.id) {
4834 if ((mydid.un.b.domain == matchdid.un.b.domain) && 4839 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
4835 (mydid.un.b.area == matchdid.un.b.area)) { 4840 (mydid.un.b.area == matchdid.un.b.area)) {
4841 /* This code is supposed to match the ID
4842 * for a private loop device that is
4843 * connect to fl_port. But we need to
4844 * check that the port did not just go
4845 * from pt2pt to fabric or we could end
4846 * up matching ndlp->nlp_DID 000001 to
4847 * fabric DID 0x20101
4848 */
4836 if ((ndlpdid.un.b.domain == 0) && 4849 if ((ndlpdid.un.b.domain == 0) &&
4837 (ndlpdid.un.b.area == 0)) { 4850 (ndlpdid.un.b.area == 0)) {
4838 if (ndlpdid.un.b.id) 4851 if (ndlpdid.un.b.id &&
4852 vport->phba->fc_topology ==
4853 LPFC_TOPOLOGY_LOOP)
4839 return 1; 4854 return 1;
4840 } 4855 }
4841 return 0; 4856 return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 37beb9dc1311..892c5257d87c 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -543,6 +543,7 @@ struct fc_vft_header {
543#define ELS_CMD_TEST 0x11000000 543#define ELS_CMD_TEST 0x11000000
544#define ELS_CMD_RRQ 0x12000000 544#define ELS_CMD_RRQ 0x12000000
545#define ELS_CMD_REC 0x13000000 545#define ELS_CMD_REC 0x13000000
546#define ELS_CMD_RDP 0x18000000
546#define ELS_CMD_PRLI 0x20100014 547#define ELS_CMD_PRLI 0x20100014
547#define ELS_CMD_PRLO 0x21100014 548#define ELS_CMD_PRLO 0x21100014
548#define ELS_CMD_PRLO_ACC 0x02100014 549#define ELS_CMD_PRLO_ACC 0x02100014
@@ -558,6 +559,7 @@ struct fc_vft_header {
558#define ELS_CMD_SCR 0x62000000 559#define ELS_CMD_SCR 0x62000000
559#define ELS_CMD_RNID 0x78000000 560#define ELS_CMD_RNID 0x78000000
560#define ELS_CMD_LIRR 0x7A000000 561#define ELS_CMD_LIRR 0x7A000000
562#define ELS_CMD_LCB 0x81000000
561#else /* __LITTLE_ENDIAN_BITFIELD */ 563#else /* __LITTLE_ENDIAN_BITFIELD */
562#define ELS_CMD_MASK 0xffff 564#define ELS_CMD_MASK 0xffff
563#define ELS_RSP_MASK 0xff 565#define ELS_RSP_MASK 0xff
@@ -580,6 +582,7 @@ struct fc_vft_header {
580#define ELS_CMD_TEST 0x11 582#define ELS_CMD_TEST 0x11
581#define ELS_CMD_RRQ 0x12 583#define ELS_CMD_RRQ 0x12
582#define ELS_CMD_REC 0x13 584#define ELS_CMD_REC 0x13
585#define ELS_CMD_RDP 0x18
583#define ELS_CMD_PRLI 0x14001020 586#define ELS_CMD_PRLI 0x14001020
584#define ELS_CMD_PRLO 0x14001021 587#define ELS_CMD_PRLO 0x14001021
585#define ELS_CMD_PRLO_ACC 0x14001002 588#define ELS_CMD_PRLO_ACC 0x14001002
@@ -595,6 +598,7 @@ struct fc_vft_header {
595#define ELS_CMD_SCR 0x62 598#define ELS_CMD_SCR 0x62
596#define ELS_CMD_RNID 0x78 599#define ELS_CMD_RNID 0x78
597#define ELS_CMD_LIRR 0x7A 600#define ELS_CMD_LIRR 0x7A
601#define ELS_CMD_LCB 0x81
598#endif 602#endif
599 603
600/* 604/*
@@ -1010,6 +1014,198 @@ typedef struct _ELS_PKT { /* Structure is in Big Endian format */
1010 } un; 1014 } un;
1011} ELS_PKT; 1015} ELS_PKT;
1012 1016
1017/*
1018 * Link Cable Beacon (LCB) ELS Frame
1019 */
1020
1021struct fc_lcb_request_frame {
1022 uint32_t lcb_command; /* ELS command opcode (0x81) */
1023 uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */
1024#define LPFC_LCB_ON 0x1
1025#define LPFC_LCB_OFF 0x2
1026 uint8_t reserved[3];
1027
1028 uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */
1029#define LPFC_LCB_GREEN 0x1
1030#define LPFC_LCB_AMBER 0x2
1031 uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */
1032 uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */
1033};
1034
1035/*
1036 * Link Cable Beacon (LCB) ELS Response Frame
1037 */
1038struct fc_lcb_res_frame {
1039 uint32_t lcb_ls_acc; /* Acceptance of LCB request (0x02) */
1040 uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */
1041 uint8_t reserved[3];
1042 uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */
1043 uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */
1044 uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */
1045};
1046
1047/*
1048 * Read Diagnostic Parameters (RDP) ELS frame.
1049 */
1050#define SFF_PG0_IDENT_SFP 0x3
1051
1052#define SFP_FLAG_PT_OPTICAL 0x0
1053#define SFP_FLAG_PT_SWLASER 0x01
1054#define SFP_FLAG_PT_LWLASER_LC1310 0x02
1055#define SFP_FLAG_PT_LWLASER_LL1550 0x03
1056#define SFP_FLAG_PT_MASK 0x0F
1057#define SFP_FLAG_PT_SHIFT 0
1058
1059#define SFP_FLAG_IS_OPTICAL_PORT 0x01
1060#define SFP_FLAG_IS_OPTICAL_MASK 0x010
1061#define SFP_FLAG_IS_OPTICAL_SHIFT 4
1062
1063#define SFP_FLAG_IS_DESC_VALID 0x01
1064#define SFP_FLAG_IS_DESC_VALID_MASK 0x020
1065#define SFP_FLAG_IS_DESC_VALID_SHIFT 5
1066
1067#define SFP_FLAG_CT_UNKNOWN 0x0
1068#define SFP_FLAG_CT_SFP_PLUS 0x01
1069#define SFP_FLAG_CT_MASK 0x3C
1070#define SFP_FLAG_CT_SHIFT 6
1071
1072struct fc_rdp_port_name_info {
1073 uint8_t wwnn[8];
1074 uint8_t wwpn[8];
1075};
1076
1077
1078/*
1079 * Link Error Status Block Structure (FC-FS-3) for RDP
1080 * This similar to RPS ELS
1081 */
1082struct fc_link_status {
1083 uint32_t link_failure_cnt;
1084 uint32_t loss_of_synch_cnt;
1085 uint32_t loss_of_signal_cnt;
1086 uint32_t primitive_seq_proto_err;
1087 uint32_t invalid_trans_word;
1088 uint32_t invalid_crc_cnt;
1089
1090};
1091
1092#define RDP_PORT_NAMES_DESC_TAG 0x00010003
1093struct fc_rdp_port_name_desc {
1094 uint32_t tag; /* 0001 0003h */
1095 uint32_t length; /* set to size of payload struct */
1096 struct fc_rdp_port_name_info port_names;
1097};
1098
1099
1100struct fc_rdp_link_error_status_payload_info {
1101 struct fc_link_status link_status; /* 24 bytes */
1102 uint32_t port_type; /* bits 31-30 only */
1103};
1104
1105#define RDP_LINK_ERROR_STATUS_DESC_TAG 0x00010002
1106struct fc_rdp_link_error_status_desc {
1107 uint32_t tag; /* 0001 0002h */
1108 uint32_t length; /* set to size of payload struct */
1109 struct fc_rdp_link_error_status_payload_info info;
1110};
1111
1112#define VN_PT_PHY_UNKNOWN 0x00
1113#define VN_PT_PHY_PF_PORT 0x01
1114#define VN_PT_PHY_ETH_MAC 0x10
1115#define VN_PT_PHY_SHIFT 30
1116
1117#define RDP_PS_1GB 0x8000
1118#define RDP_PS_2GB 0x4000
1119#define RDP_PS_4GB 0x2000
1120#define RDP_PS_10GB 0x1000
1121#define RDP_PS_8GB 0x0800
1122#define RDP_PS_16GB 0x0400
1123#define RDP_PS_32GB 0x0200
1124
1125#define RDP_CAP_UNKNOWN 0x0001
1126#define RDP_PS_UNKNOWN 0x0002
1127#define RDP_PS_NOT_ESTABLISHED 0x0001
1128
1129struct fc_rdp_port_speed {
1130 uint16_t capabilities;
1131 uint16_t speed;
1132};
1133
1134struct fc_rdp_port_speed_info {
1135 struct fc_rdp_port_speed port_speed;
1136};
1137
1138#define RDP_PORT_SPEED_DESC_TAG 0x00010001
1139struct fc_rdp_port_speed_desc {
1140 uint32_t tag; /* 00010001h */
1141 uint32_t length; /* set to size of payload struct */
1142 struct fc_rdp_port_speed_info info;
1143};
1144
1145#define RDP_NPORT_ID_SIZE 4
1146#define RDP_N_PORT_DESC_TAG 0x00000003
1147struct fc_rdp_nport_desc {
1148 uint32_t tag; /* 0000 0003h, big endian */
1149 uint32_t length; /* size of RDP_N_PORT_ID struct */
1150 uint32_t nport_id : 12;
1151 uint32_t reserved : 8;
1152};
1153
1154
1155struct fc_rdp_link_service_info {
1156 uint32_t els_req; /* Request payload word 0 value.*/
1157};
1158
1159#define RDP_LINK_SERVICE_DESC_TAG 0x00000001
1160struct fc_rdp_link_service_desc {
1161 uint32_t tag; /* Descriptor tag 1 */
1162 uint32_t length; /* set to size of payload struct. */
1163 struct fc_rdp_link_service_info payload;
1164 /* must be ELS req Word 0(0x18) */
1165};
1166
1167struct fc_rdp_sfp_info {
1168 uint16_t temperature;
1169 uint16_t vcc;
1170 uint16_t tx_bias;
1171 uint16_t tx_power;
1172 uint16_t rx_power;
1173 uint16_t flags;
1174};
1175
1176#define RDP_SFP_DESC_TAG 0x00010000
1177struct fc_rdp_sfp_desc {
1178 uint32_t tag;
1179 uint32_t length; /* set to size of sfp_info struct */
1180 struct fc_rdp_sfp_info sfp_info;
1181};
1182
1183struct fc_rdp_req_frame {
1184 uint32_t rdp_command; /* ELS command opcode (0x18)*/
1185 uint32_t rdp_des_length; /* RDP Payload Word 1 */
1186 struct fc_rdp_nport_desc nport_id_desc; /* RDP Payload Word 2 - 4 */
1187};
1188
1189
1190struct fc_rdp_res_frame {
1191 uint32_t reply_sequence; /* FC word0 LS_ACC or LS_RJT */
1192 uint32_t length; /* FC Word 1 */
1193 struct fc_rdp_link_service_desc link_service_desc; /* Word 2 -4 */
1194 struct fc_rdp_sfp_desc sfp_desc; /* Word 5 -9 */
1195 struct fc_rdp_port_speed_desc portspeed_desc; /* Word 10-12 */
1196 struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */
1197 struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22-27 */
1198 struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */
1199};
1200
1201
1202#define RDP_DESC_PAYLOAD_SIZE (sizeof(struct fc_rdp_link_service_desc) \
1203 + sizeof(struct fc_rdp_sfp_desc) \
1204 + sizeof(struct fc_rdp_port_speed_desc) \
1205 + sizeof(struct fc_rdp_link_error_status_desc) \
1206 + (sizeof(struct fc_rdp_port_name_desc) * 2))
1207
1208
1013/******** FDMI ********/ 1209/******** FDMI ********/
1014 1210
1015/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */ 1211/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
@@ -1587,6 +1783,11 @@ typedef struct { /* FireFly BIU registers */
1587#define TEMPERATURE_OFFSET 0xB0 /* Slim offset for critical temperature event */ 1783#define TEMPERATURE_OFFSET 0xB0 /* Slim offset for critical temperature event */
1588 1784
1589/* 1785/*
1786 * return code Fail
1787 */
1788#define FAILURE 1
1789
1790/*
1590 * Begin Structure Definitions for Mailbox Commands 1791 * Begin Structure Definitions for Mailbox Commands
1591 */ 1792 */
1592 1793
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1813c45946f4..33ec4fa39ccb 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -291,7 +291,7 @@ struct sli4_bls_rsp {
291struct lpfc_eqe { 291struct lpfc_eqe {
292 uint32_t word0; 292 uint32_t word0;
293#define lpfc_eqe_resource_id_SHIFT 16 293#define lpfc_eqe_resource_id_SHIFT 16
294#define lpfc_eqe_resource_id_MASK 0x000000FF 294#define lpfc_eqe_resource_id_MASK 0x0000FFFF
295#define lpfc_eqe_resource_id_WORD word0 295#define lpfc_eqe_resource_id_WORD word0
296#define lpfc_eqe_minor_code_SHIFT 4 296#define lpfc_eqe_minor_code_SHIFT 4
297#define lpfc_eqe_minor_code_MASK 0x00000FFF 297#define lpfc_eqe_minor_code_MASK 0x00000FFF
@@ -914,6 +914,8 @@ struct mbox_header {
914#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D 914#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
915#define LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG 0x3E 915#define LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG 0x3E
916#define LPFC_MBOX_OPCODE_SET_BOOT_CONFIG 0x43 916#define LPFC_MBOX_OPCODE_SET_BOOT_CONFIG 0x43
917#define LPFC_MBOX_OPCODE_SET_BEACON_CONFIG 0x45
918#define LPFC_MBOX_OPCODE_GET_BEACON_CONFIG 0x46
917#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D 919#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D
918#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A 920#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
919#define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B 921#define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B
@@ -1479,6 +1481,26 @@ struct lpfc_mbx_query_fw_config {
1479 } rsp; 1481 } rsp;
1480}; 1482};
1481 1483
1484struct lpfc_mbx_set_beacon_config {
1485 struct mbox_header header;
1486 uint32_t word4;
1487#define lpfc_mbx_set_beacon_port_num_SHIFT 0
1488#define lpfc_mbx_set_beacon_port_num_MASK 0x0000003F
1489#define lpfc_mbx_set_beacon_port_num_WORD word4
1490#define lpfc_mbx_set_beacon_port_type_SHIFT 6
1491#define lpfc_mbx_set_beacon_port_type_MASK 0x00000003
1492#define lpfc_mbx_set_beacon_port_type_WORD word4
1493#define lpfc_mbx_set_beacon_state_SHIFT 8
1494#define lpfc_mbx_set_beacon_state_MASK 0x000000FF
1495#define lpfc_mbx_set_beacon_state_WORD word4
1496#define lpfc_mbx_set_beacon_duration_SHIFT 16
1497#define lpfc_mbx_set_beacon_duration_MASK 0x000000FF
1498#define lpfc_mbx_set_beacon_duration_WORD word4
1499#define lpfc_mbx_set_beacon_status_duration_SHIFT 24
1500#define lpfc_mbx_set_beacon_status_duration_MASK 0x000000FF
1501#define lpfc_mbx_set_beacon_status_duration_WORD word4
1502};
1503
1482struct lpfc_id_range { 1504struct lpfc_id_range {
1483 uint32_t word5; 1505 uint32_t word5;
1484#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0 1506#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
@@ -1921,6 +1943,12 @@ struct lpfc_mbx_redisc_fcf_tbl {
1921#define STATUS_FCF_IN_USE 0x3a 1943#define STATUS_FCF_IN_USE 0x3a
1922#define STATUS_FCF_TABLE_EMPTY 0x43 1944#define STATUS_FCF_TABLE_EMPTY 0x43
1923 1945
1946/*
1947 * Additional status field for embedded SLI_CONFIG mailbox
1948 * command.
1949 */
1950#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67
1951
1924struct lpfc_mbx_sli4_config { 1952struct lpfc_mbx_sli4_config {
1925 struct mbox_header header; 1953 struct mbox_header header;
1926}; 1954};
@@ -2433,6 +2461,205 @@ struct lpfc_mbx_supp_pages {
2433#define LPFC_SLI4_PARAMETERS 2 2461#define LPFC_SLI4_PARAMETERS 2
2434}; 2462};
2435 2463
2464struct lpfc_mbx_memory_dump_type3 {
2465 uint32_t word1;
2466#define lpfc_mbx_memory_dump_type3_type_SHIFT 0
2467#define lpfc_mbx_memory_dump_type3_type_MASK 0x0000000f
2468#define lpfc_mbx_memory_dump_type3_type_WORD word1
2469#define lpfc_mbx_memory_dump_type3_link_SHIFT 24
2470#define lpfc_mbx_memory_dump_type3_link_MASK 0x000000ff
2471#define lpfc_mbx_memory_dump_type3_link_WORD word1
2472 uint32_t word2;
2473#define lpfc_mbx_memory_dump_type3_page_no_SHIFT 0
2474#define lpfc_mbx_memory_dump_type3_page_no_MASK 0x0000ffff
2475#define lpfc_mbx_memory_dump_type3_page_no_WORD word2
2476#define lpfc_mbx_memory_dump_type3_offset_SHIFT 16
2477#define lpfc_mbx_memory_dump_type3_offset_MASK 0x0000ffff
2478#define lpfc_mbx_memory_dump_type3_offset_WORD word2
2479 uint32_t word3;
2480#define lpfc_mbx_memory_dump_type3_length_SHIFT 0
2481#define lpfc_mbx_memory_dump_type3_length_MASK 0x00ffffff
2482#define lpfc_mbx_memory_dump_type3_length_WORD word3
2483 uint32_t addr_lo;
2484 uint32_t addr_hi;
2485 uint32_t return_len;
2486};
2487
2488#define DMP_PAGE_A0 0xa0
2489#define DMP_PAGE_A2 0xa2
2490#define DMP_SFF_PAGE_A0_SIZE 256
2491#define DMP_SFF_PAGE_A2_SIZE 256
2492
2493#define SFP_WAVELENGTH_LC1310 1310
2494#define SFP_WAVELENGTH_LL1550 1550
2495
2496
2497/*
2498 * * SFF-8472 TABLE 3.4
2499 * */
2500#define SFF_PG0_CONNECTOR_UNKNOWN 0x00 /* Unknown */
2501#define SFF_PG0_CONNECTOR_SC 0x01 /* SC */
2502#define SFF_PG0_CONNECTOR_FC_COPPER1 0x02 /* FC style 1 copper connector */
2503#define SFF_PG0_CONNECTOR_FC_COPPER2 0x03 /* FC style 2 copper connector */
2504#define SFF_PG0_CONNECTOR_BNC 0x04 /* BNC / TNC */
2505#define SFF_PG0_CONNECTOR__FC_COAX 0x05 /* FC coaxial headers */
2506#define SFF_PG0_CONNECTOR_FIBERJACK 0x06 /* FiberJack */
2507#define SFF_PG0_CONNECTOR_LC 0x07 /* LC */
2508#define SFF_PG0_CONNECTOR_MT 0x08 /* MT - RJ */
2509#define SFF_PG0_CONNECTOR_MU 0x09 /* MU */
2510#define SFF_PG0_CONNECTOR_SF 0x0A /* SG */
2511#define SFF_PG0_CONNECTOR_OPTICAL_PIGTAIL 0x0B /* Optical pigtail */
2512#define SFF_PG0_CONNECTOR_OPTICAL_PARALLEL 0x0C /* MPO Parallel Optic */
2513#define SFF_PG0_CONNECTOR_HSSDC_II 0x20 /* HSSDC II */
2514#define SFF_PG0_CONNECTOR_COPPER_PIGTAIL 0x21 /* Copper pigtail */
2515#define SFF_PG0_CONNECTOR_RJ45 0x22 /* RJ45 */
2516
2517/* SFF-8472 Table 3.1 Diagnostics: Data Fields Address/Page A0 */
2518
2519#define SSF_IDENTIFIER 0
2520#define SSF_EXT_IDENTIFIER 1
2521#define SSF_CONNECTOR 2
2522#define SSF_TRANSCEIVER_CODE_B0 3
2523#define SSF_TRANSCEIVER_CODE_B1 4
2524#define SSF_TRANSCEIVER_CODE_B2 5
2525#define SSF_TRANSCEIVER_CODE_B3 6
2526#define SSF_TRANSCEIVER_CODE_B4 7
2527#define SSF_TRANSCEIVER_CODE_B5 8
2528#define SSF_TRANSCEIVER_CODE_B6 9
2529#define SSF_TRANSCEIVER_CODE_B7 10
2530#define SSF_ENCODING 11
2531#define SSF_BR_NOMINAL 12
2532#define SSF_RATE_IDENTIFIER 13
2533#define SSF_LENGTH_9UM_KM 14
2534#define SSF_LENGTH_9UM 15
2535#define SSF_LENGTH_50UM_OM2 16
2536#define SSF_LENGTH_62UM_OM1 17
2537#define SFF_LENGTH_COPPER 18
2538#define SSF_LENGTH_50UM_OM3 19
2539#define SSF_VENDOR_NAME 20
2540#define SSF_VENDOR_OUI 36
2541#define SSF_VENDOR_PN 40
2542#define SSF_VENDOR_REV 56
2543#define SSF_WAVELENGTH_B1 60
2544#define SSF_WAVELENGTH_B0 61
2545#define SSF_CC_BASE 63
2546#define SSF_OPTIONS_B1 64
2547#define SSF_OPTIONS_B0 65
2548#define SSF_BR_MAX 66
2549#define SSF_BR_MIN 67
2550#define SSF_VENDOR_SN 68
2551#define SSF_DATE_CODE 84
2552#define SSF_MONITORING_TYPEDIAGNOSTIC 92
2553#define SSF_ENHANCED_OPTIONS 93
2554#define SFF_8472_COMPLIANCE 94
2555#define SSF_CC_EXT 95
2556#define SSF_A0_VENDOR_SPECIFIC 96
2557
2558/* SFF-8472 Table 3.1a Diagnostics: Data Fields Address/Page A2 */
2559
2560#define SSF_AW_THRESHOLDS 0
2561#define SSF_EXT_CAL_CONSTANTS 56
2562#define SSF_CC_DMI 95
2563#define SFF_TEMPERATURE_B1 96
2564#define SFF_TEMPERATURE_B0 97
2565#define SFF_VCC_B1 98
2566#define SFF_VCC_B0 99
2567#define SFF_TX_BIAS_CURRENT_B1 100
2568#define SFF_TX_BIAS_CURRENT_B0 101
2569#define SFF_TXPOWER_B1 102
2570#define SFF_TXPOWER_B0 103
2571#define SFF_RXPOWER_B1 104
2572#define SFF_RXPOWER_B0 105
2573#define SSF_STATUS_CONTROL 110
2574#define SSF_ALARM_FLAGS_B1 112
2575#define SSF_ALARM_FLAGS_B0 113
2576#define SSF_WARNING_FLAGS_B1 116
2577#define SSF_WARNING_FLAGS_B0 117
2578#define SSF_EXT_TATUS_CONTROL_B1 118
2579#define SSF_EXT_TATUS_CONTROL_B0 119
2580#define SSF_A2_VENDOR_SPECIFIC 120
2581#define SSF_USER_EEPROM 128
2582#define SSF_VENDOR_CONTROL 148
2583
2584
2585/*
2586 * Tranceiver codes Fibre Channel SFF-8472
2587 * Table 3.5.
2588 */
2589
2590struct sff_trasnceiver_codes_byte0 {
2591 uint8_t inifiband:4;
2592 uint8_t teng_ethernet:4;
2593};
2594
2595struct sff_trasnceiver_codes_byte1 {
2596 uint8_t sonet:6;
2597 uint8_t escon:2;
2598};
2599
2600struct sff_trasnceiver_codes_byte2 {
2601 uint8_t soNet:8;
2602};
2603
2604struct sff_trasnceiver_codes_byte3 {
2605 uint8_t ethernet:8;
2606};
2607
2608struct sff_trasnceiver_codes_byte4 {
2609 uint8_t fc_el_lo:1;
2610 uint8_t fc_lw_laser:1;
2611 uint8_t fc_sw_laser:1;
2612 uint8_t fc_md_distance:1;
2613 uint8_t fc_lg_distance:1;
2614 uint8_t fc_int_distance:1;
2615 uint8_t fc_short_distance:1;
2616 uint8_t fc_vld_distance:1;
2617};
2618
2619struct sff_trasnceiver_codes_byte5 {
2620 uint8_t reserved1:1;
2621 uint8_t reserved2:1;
2622 uint8_t fc_sfp_active:1; /* Active cable */
2623 uint8_t fc_sfp_passive:1; /* Passive cable */
2624 uint8_t fc_lw_laser:1; /* Longwave laser */
2625 uint8_t fc_sw_laser_sl:1;
2626 uint8_t fc_sw_laser_sn:1;
2627 uint8_t fc_el_hi:1; /* Electrical enclosure high bit */
2628};
2629
2630struct sff_trasnceiver_codes_byte6 {
2631 uint8_t fc_tm_sm:1; /* Single Mode */
2632 uint8_t reserved:1;
2633 uint8_t fc_tm_m6:1; /* Multimode, 62.5um (M6) */
2634 uint8_t fc_tm_tv:1; /* Video Coax (TV) */
2635 uint8_t fc_tm_mi:1; /* Miniature Coax (MI) */
2636 uint8_t fc_tm_tp:1; /* Twisted Pair (TP) */
2637 uint8_t fc_tm_tw:1; /* Twin Axial Pair */
2638};
2639
2640struct sff_trasnceiver_codes_byte7 {
2641 uint8_t fc_sp_100MB:1; /* 100 MB/sec */
2642 uint8_t reserve:1;
2643 uint8_t fc_sp_200mb:1; /* 200 MB/sec */
2644 uint8_t fc_sp_3200MB:1; /* 3200 MB/sec */
2645 uint8_t fc_sp_400MB:1; /* 400 MB/sec */
2646 uint8_t fc_sp_1600MB:1; /* 1600 MB/sec */
2647 uint8_t fc_sp_800MB:1; /* 800 MB/sec */
2648 uint8_t fc_sp_1200MB:1; /* 1200 MB/sec */
2649};
2650
2651/* User writable non-volatile memory, SFF-8472 Table 3.20 */
2652struct user_eeprom {
2653 uint8_t vendor_name[16];
2654 uint8_t vendor_oui[3];
2655 uint8_t vendor_pn[816];
2656 uint8_t vendor_rev[4];
2657 uint8_t vendor_sn[16];
2658 uint8_t datecode[6];
2659 uint8_t lot_code[2];
2660 uint8_t reserved191[57];
2661};
2662
2436struct lpfc_mbx_pc_sli4_params { 2663struct lpfc_mbx_pc_sli4_params {
2437 uint32_t word1; 2664 uint32_t word1;
2438#define qs_SHIFT 0 2665#define qs_SHIFT 0
@@ -3021,6 +3248,7 @@ struct lpfc_mqe {
3021 struct lpfc_mbx_request_features req_ftrs; 3248 struct lpfc_mbx_request_features req_ftrs;
3022 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; 3249 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
3023 struct lpfc_mbx_query_fw_config query_fw_cfg; 3250 struct lpfc_mbx_query_fw_config query_fw_cfg;
3251 struct lpfc_mbx_set_beacon_config beacon_config;
3024 struct lpfc_mbx_supp_pages supp_pages; 3252 struct lpfc_mbx_supp_pages supp_pages;
3025 struct lpfc_mbx_pc_sli4_params sli4_params; 3253 struct lpfc_mbx_pc_sli4_params sli4_params;
3026 struct lpfc_mbx_get_sli4_parameters get_sli4_parameters; 3254 struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
@@ -3031,6 +3259,7 @@ struct lpfc_mqe {
3031 struct lpfc_mbx_get_prof_cfg get_prof_cfg; 3259 struct lpfc_mbx_get_prof_cfg get_prof_cfg;
3032 struct lpfc_mbx_wr_object wr_object; 3260 struct lpfc_mbx_wr_object wr_object;
3033 struct lpfc_mbx_get_port_name get_port_name; 3261 struct lpfc_mbx_get_port_name get_port_name;
3262 struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
3034 struct lpfc_mbx_nop nop; 3263 struct lpfc_mbx_nop nop;
3035 } un; 3264 } un;
3036}; 3265};
@@ -3041,8 +3270,8 @@ struct lpfc_mcqe {
3041#define lpfc_mcqe_status_MASK 0x0000FFFF 3270#define lpfc_mcqe_status_MASK 0x0000FFFF
3042#define lpfc_mcqe_status_WORD word0 3271#define lpfc_mcqe_status_WORD word0
3043#define lpfc_mcqe_ext_status_SHIFT 16 3272#define lpfc_mcqe_ext_status_SHIFT 16
3044#define lpfc_mcqe_ext_status_MASK 0x0000FFFF 3273#define lpfc_mcqe_ext_status_MASK 0x0000FFFF
3045#define lpfc_mcqe_ext_status_WORD word0 3274#define lpfc_mcqe_ext_status_WORD word0
3046 uint32_t mcqe_tag0; 3275 uint32_t mcqe_tag0;
3047 uint32_t mcqe_tag1; 3276 uint32_t mcqe_tag1;
3048 uint32_t trailer; 3277 uint32_t trailer;
@@ -3176,6 +3405,7 @@ struct lpfc_acqe_fc_la {
3176#define LPFC_FC_LA_SPEED_8G 0x8 3405#define LPFC_FC_LA_SPEED_8G 0x8
3177#define LPFC_FC_LA_SPEED_10G 0xA 3406#define LPFC_FC_LA_SPEED_10G 0xA
3178#define LPFC_FC_LA_SPEED_16G 0x10 3407#define LPFC_FC_LA_SPEED_16G 0x10
3408#define LPFC_FC_LA_SPEED_32G 0x20
3179#define lpfc_acqe_fc_la_topology_SHIFT 16 3409#define lpfc_acqe_fc_la_topology_SHIFT 16
3180#define lpfc_acqe_fc_la_topology_MASK 0x000000FF 3410#define lpfc_acqe_fc_la_topology_MASK 0x000000FF
3181#define lpfc_acqe_fc_la_topology_WORD word0 3411#define lpfc_acqe_fc_la_topology_WORD word0
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e8c8c1ecc1f5..f962118da8ed 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3303,6 +3303,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
3303 shost->max_lun = vport->cfg_max_luns; 3303 shost->max_lun = vport->cfg_max_luns;
3304 shost->this_id = -1; 3304 shost->this_id = -1;
3305 shost->max_cmd_len = 16; 3305 shost->max_cmd_len = 16;
3306 shost->nr_hw_queues = phba->cfg_fcp_io_channel;
3306 if (phba->sli_rev == LPFC_SLI_REV4) { 3307 if (phba->sli_rev == LPFC_SLI_REV4) {
3307 shost->dma_boundary = 3308 shost->dma_boundary =
3308 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 3309 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
@@ -4483,7 +4484,13 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4483 lpfc_destroy_vport_work_array(phba, vports); 4484 lpfc_destroy_vport_work_array(phba, vports);
4484 } 4485 }
4485 4486
4486 if (active_vlink_present) { 4487 /*
4488 * Don't re-instantiate if vport is marked for deletion.
4489 * If we are here first then vport_delete is going to wait
4490 * for discovery to complete.
4491 */
4492 if (!(vport->load_flag & FC_UNLOADING) &&
4493 active_vlink_present) {
4487 /* 4494 /*
4488 * If there are other active VLinks present, 4495 * If there are other active VLinks present,
4489 * re-instantiate the Vlink using FDISC. 4496 * re-instantiate the Vlink using FDISC.
@@ -7500,6 +7507,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7500 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 7507 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
7501 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 7508 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
7502 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 7509 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
7510 phba->sli4_hba.physical_port =
7511 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
7503 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7512 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7504 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 7513 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
7505 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 7514 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
@@ -8367,7 +8376,7 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba)
8367 8376
8368 /* vector-0 is associated to slow-path handler */ 8377 /* vector-0 is associated to slow-path handler */
8369 rc = request_irq(phba->msix_entries[0].vector, 8378 rc = request_irq(phba->msix_entries[0].vector,
8370 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 8379 &lpfc_sli_sp_intr_handler, 0,
8371 LPFC_SP_DRIVER_HANDLER_NAME, phba); 8380 LPFC_SP_DRIVER_HANDLER_NAME, phba);
8372 if (rc) { 8381 if (rc) {
8373 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8382 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -8378,7 +8387,7 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba)
8378 8387
8379 /* vector-1 is associated to fast-path handler */ 8388 /* vector-1 is associated to fast-path handler */
8380 rc = request_irq(phba->msix_entries[1].vector, 8389 rc = request_irq(phba->msix_entries[1].vector,
8381 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 8390 &lpfc_sli_fp_intr_handler, 0,
8382 LPFC_FP_DRIVER_HANDLER_NAME, phba); 8391 LPFC_FP_DRIVER_HANDLER_NAME, phba);
8383 8392
8384 if (rc) { 8393 if (rc) {
@@ -8487,7 +8496,7 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba)
8487 } 8496 }
8488 8497
8489 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 8498 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
8490 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 8499 0, LPFC_DRIVER_NAME, phba);
8491 if (rc) { 8500 if (rc) {
8492 pci_disable_msi(phba->pcidev); 8501 pci_disable_msi(phba->pcidev);
8493 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8502 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -8944,13 +8953,13 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
8944 if (phba->cfg_fof && (index == (vectors - 1))) 8953 if (phba->cfg_fof && (index == (vectors - 1)))
8945 rc = request_irq( 8954 rc = request_irq(
8946 phba->sli4_hba.msix_entries[index].vector, 8955 phba->sli4_hba.msix_entries[index].vector,
8947 &lpfc_sli4_fof_intr_handler, IRQF_SHARED, 8956 &lpfc_sli4_fof_intr_handler, 0,
8948 (char *)&phba->sli4_hba.handler_name[index], 8957 (char *)&phba->sli4_hba.handler_name[index],
8949 &phba->sli4_hba.fcp_eq_hdl[index]); 8958 &phba->sli4_hba.fcp_eq_hdl[index]);
8950 else 8959 else
8951 rc = request_irq( 8960 rc = request_irq(
8952 phba->sli4_hba.msix_entries[index].vector, 8961 phba->sli4_hba.msix_entries[index].vector,
8953 &lpfc_sli4_hba_intr_handler, IRQF_SHARED, 8962 &lpfc_sli4_hba_intr_handler, 0,
8954 (char *)&phba->sli4_hba.handler_name[index], 8963 (char *)&phba->sli4_hba.handler_name[index],
8955 &phba->sli4_hba.fcp_eq_hdl[index]); 8964 &phba->sli4_hba.fcp_eq_hdl[index]);
8956 if (rc) { 8965 if (rc) {
@@ -8972,7 +8981,8 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
8972 phba->cfg_fcp_io_channel = vectors; 8981 phba->cfg_fcp_io_channel = vectors;
8973 } 8982 }
8974 8983
8975 lpfc_sli4_set_affinity(phba, vectors); 8984 if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport)))
8985 lpfc_sli4_set_affinity(phba, vectors);
8976 return rc; 8986 return rc;
8977 8987
8978cfg_fail_out: 8988cfg_fail_out:
@@ -9050,7 +9060,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
9050 } 9060 }
9051 9061
9052 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 9062 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
9053 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 9063 0, LPFC_DRIVER_NAME, phba);
9054 if (rc) { 9064 if (rc) {
9055 pci_disable_msi(phba->pcidev); 9065 pci_disable_msi(phba->pcidev);
9056 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9066 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 816f596cda60..eb627724417e 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2255,6 +2255,158 @@ lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2255 return 0; 2255 return 0;
2256} 2256}
2257 2257
2258void
2259lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2260{
2261 MAILBOX_t *mb;
2262 int rc = FAILURE;
2263 struct lpfc_rdp_context *rdp_context =
2264 (struct lpfc_rdp_context *)(mboxq->context2);
2265
2266 mb = &mboxq->u.mb;
2267 if (mb->mbxStatus)
2268 goto mbx_failed;
2269
2270 memcpy(&rdp_context->link_stat, &mb->un.varRdLnk, sizeof(READ_LNK_VAR));
2271
2272 rc = SUCCESS;
2273
2274mbx_failed:
2275 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2276 rdp_context->cmpl(phba, rdp_context, rc);
2277}
2278
2279void
2280lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2281{
2282 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) mbox->context1;
2283 struct lpfc_rdp_context *rdp_context =
2284 (struct lpfc_rdp_context *)(mbox->context2);
2285
2286 if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
2287 goto error;
2288
2289 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
2290 DMP_SFF_PAGE_A2_SIZE);
2291
2292 /* We don't need dma buffer for link stat. */
2293 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2294 kfree(mp);
2295
2296 memset(mbox, 0, sizeof(*mbox));
2297 lpfc_read_lnk_stat(phba, mbox);
2298 mbox->vport = rdp_context->ndlp->vport;
2299 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
2300 mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
2301 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
2302 goto error;
2303
2304 return;
2305
2306error:
2307 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2308 kfree(mp);
2309 lpfc_sli4_mbox_cmd_free(phba, mbox);
2310 rdp_context->cmpl(phba, rdp_context, FAILURE);
2311}
2312
2313void
2314lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2315{
2316 int rc;
2317 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (mbox->context1);
2318 struct lpfc_rdp_context *rdp_context =
2319 (struct lpfc_rdp_context *)(mbox->context2);
2320
2321 if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
2322 goto error;
2323
2324 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0,
2325 DMP_SFF_PAGE_A0_SIZE);
2326
2327 memset(mbox, 0, sizeof(*mbox));
2328
2329 memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE);
2330 INIT_LIST_HEAD(&mp->list);
2331
2332 /* save address for completion */
2333 mbox->context1 = mp;
2334 mbox->vport = rdp_context->ndlp->vport;
2335
2336 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
2337 bf_set(lpfc_mbx_memory_dump_type3_type,
2338 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
2339 bf_set(lpfc_mbx_memory_dump_type3_link,
2340 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
2341 bf_set(lpfc_mbx_memory_dump_type3_page_no,
2342 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2);
2343 bf_set(lpfc_mbx_memory_dump_type3_length,
2344 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE);
2345 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
2346 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
2347
2348 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2;
2349 mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
2350 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2351 if (rc == MBX_NOT_FINISHED)
2352 goto error;
2353
2354 return;
2355
2356error:
2357 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2358 kfree(mp);
2359 lpfc_sli4_mbox_cmd_free(phba, mbox);
2360 rdp_context->cmpl(phba, rdp_context, FAILURE);
2361}
2362
2363
2364/*
2365 * lpfc_sli4_dump_sfp_pagea0 - Dump sli4 read SFP Diagnostic.
2366 * @phba: pointer to the hba structure containing.
2367 * @mbox: pointer to lpfc mbox command to initialize.
2368 *
2369 * This function create a SLI4 dump mailbox command to dump configure
2370 * type 3 page 0xA0.
2371 */
2372int
2373lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2374{
2375 struct lpfc_dmabuf *mp = NULL;
2376
2377 memset(mbox, 0, sizeof(*mbox));
2378
2379 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2380 if (mp)
2381 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2382 if (!mp || !mp->virt) {
2383 kfree(mp);
2384 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2385 "3569 dump type 3 page 0xA0 allocation failed\n");
2386 return 1;
2387 }
2388
2389 memset(mp->virt, 0, LPFC_BPL_SIZE);
2390 INIT_LIST_HEAD(&mp->list);
2391
2392 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
2393 /* save address for completion */
2394 mbox->context1 = mp;
2395
2396 bf_set(lpfc_mbx_memory_dump_type3_type,
2397 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
2398 bf_set(lpfc_mbx_memory_dump_type3_link,
2399 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
2400 bf_set(lpfc_mbx_memory_dump_type3_page_no,
2401 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0);
2402 bf_set(lpfc_mbx_memory_dump_type3_length,
2403 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
2404 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
2405 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
2406
2407 return 0;
2408}
2409
2258/** 2410/**
2259 * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command 2411 * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
2260 * @phba: pointer to the hba structure containing the FCF index and RQ ID. 2412 * @phba: pointer to the hba structure containing the FCF index and RQ ID.
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 4cb9882af157..af3b38aba65e 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -661,7 +661,13 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
661 lpfc_destroy_vport_work_array(phba, vports); 661 lpfc_destroy_vport_work_array(phba, vports);
662 } 662 }
663 663
664 if (active_vlink_present) { 664 /*
665 * Don't re-instantiate if vport is marked for deletion.
666 * If we are here first then vport_delete is going to wait
667 * for discovery to complete.
668 */
669 if (!(vport->load_flag & FC_UNLOADING) &&
670 active_vlink_present) {
665 /* 671 /*
666 * If there are other active VLinks present, 672 * If there are other active VLinks present,
667 * re-instantiate the Vlink using FDISC. 673 * re-instantiate the Vlink using FDISC.
@@ -1868,7 +1874,7 @@ lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1868 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; 1874 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1869 1875
1870 spin_lock_irq(shost->host_lock); 1876 spin_lock_irq(shost->host_lock);
1871 ndlp->nlp_flag &= NLP_LOGO_ACC; 1877 ndlp->nlp_flag |= NLP_LOGO_ACC;
1872 spin_unlock_irq(shost->host_lock); 1878 spin_unlock_irq(shost->host_lock);
1873 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 1879 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1874 return ndlp->nlp_state; 1880 return ndlp->nlp_state;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c140f99772ca..e5eb40d2c512 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3257,7 +3257,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3257 */ 3257 */
3258 3258
3259 nseg = scsi_dma_map(scsi_cmnd); 3259 nseg = scsi_dma_map(scsi_cmnd);
3260 if (unlikely(!nseg)) 3260 if (unlikely(nseg <= 0))
3261 return 1; 3261 return 1;
3262 sgl += 1; 3262 sgl += 1;
3263 /* clear the last flag in the fcp_rsp map entry */ 3263 /* clear the last flag in the fcp_rsp map entry */
@@ -3846,6 +3846,49 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3846} 3846}
3847 3847
3848/** 3848/**
3849 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
3850 * @phba: Pointer to HBA context object.
3851 *
3852 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
3853 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
3854 * held.
3855 * If scsi-mq is enabled, get the default block layer mapping of software queues
3856 * to hardware queues. This information is saved in request tag.
3857 *
3858 * Return: index into SLI4 fast-path FCP queue index.
3859 **/
3860int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
3861 struct lpfc_scsi_buf *lpfc_cmd)
3862{
3863 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3864 struct lpfc_vector_map_info *cpup;
3865 int chann, cpu;
3866 uint32_t tag;
3867 uint16_t hwq;
3868
3869 if (shost_use_blk_mq(cmnd->device->host)) {
3870 tag = blk_mq_unique_tag(cmnd->request);
3871 hwq = blk_mq_unique_tag_to_hwq(tag);
3872
3873 return hwq;
3874 }
3875
3876 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
3877 && phba->cfg_fcp_io_channel > 1) {
3878 cpu = smp_processor_id();
3879 if (cpu < phba->sli4_hba.num_present_cpu) {
3880 cpup = phba->sli4_hba.cpu_map;
3881 cpup += cpu;
3882 return cpup->channel_id;
3883 }
3884 }
3885 chann = atomic_add_return(1, &phba->fcp_qidx);
3886 chann = (chann % phba->cfg_fcp_io_channel);
3887 return chann;
3888}
3889
3890
3891/**
3849 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 3892 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3850 * @phba: The Hba for which this call is being executed. 3893 * @phba: The Hba for which this call is being executed.
3851 * @pIocbIn: The command IOCBQ for the scsi cmnd. 3894 * @pIocbIn: The command IOCBQ for the scsi cmnd.
@@ -4537,7 +4580,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4537 if (lpfc_cmd == NULL) { 4580 if (lpfc_cmd == NULL) {
4538 lpfc_rampdown_queue_depth(phba); 4581 lpfc_rampdown_queue_depth(phba);
4539 4582
4540 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4583 lpfc_printf_vlog(vport, KERN_INFO, LOG_MISC,
4541 "0707 driver's buffer pool is empty, " 4584 "0707 driver's buffer pool is empty, "
4542 "IO busied\n"); 4585 "IO busied\n");
4543 goto out_host_busy; 4586 goto out_host_busy;
@@ -4968,13 +5011,16 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
4968 iocbq, iocbqrsp, lpfc_cmd->timeout); 5011 iocbq, iocbqrsp, lpfc_cmd->timeout);
4969 if ((status != IOCB_SUCCESS) || 5012 if ((status != IOCB_SUCCESS) ||
4970 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) { 5013 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
4971 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5014 if (status != IOCB_SUCCESS ||
4972 "0727 TMF %s to TGT %d LUN %llu failed (%d, %d) " 5015 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
4973 "iocb_flag x%x\n", 5016 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4974 lpfc_taskmgmt_name(task_mgmt_cmd), 5017 "0727 TMF %s to TGT %d LUN %llu "
4975 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, 5018 "failed (%d, %d) iocb_flag x%x\n",
4976 iocbqrsp->iocb.un.ulpWord[4], 5019 lpfc_taskmgmt_name(task_mgmt_cmd),
4977 iocbq->iocb_flag); 5020 tgt_id, lun_id,
5021 iocbqrsp->iocb.ulpStatus,
5022 iocbqrsp->iocb.un.ulpWord[4],
5023 iocbq->iocb_flag);
4978 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ 5024 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
4979 if (status == IOCB_SUCCESS) { 5025 if (status == IOCB_SUCCESS) {
4980 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 5026 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
@@ -4988,7 +5034,6 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
4988 } else { 5034 } else {
4989 ret = FAILED; 5035 ret = FAILED;
4990 } 5036 }
4991 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4992 } else 5037 } else
4993 ret = SUCCESS; 5038 ret = SUCCESS;
4994 5039
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 474e30cdee6e..18b9260ccfac 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -184,3 +184,6 @@ struct lpfc_scsi_buf {
184#define FIND_FIRST_OAS_LUN 0 184#define FIND_FIRST_OAS_LUN 0
185#define NO_MORE_OAS_LUN -1 185#define NO_MORE_OAS_LUN -1
186#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN 186#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN
187
188int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
189 struct lpfc_scsi_buf *lpfc_cmd);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 56f73682d4bd..4feb9312a447 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2249,7 +2249,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2249 vport->vpi, ndlp->nlp_rpi, 2249 vport->vpi, ndlp->nlp_rpi,
2250 ndlp->nlp_DID, 2250 ndlp->nlp_DID,
2251 ndlp->nlp_usg_map, ndlp); 2251 ndlp->nlp_usg_map, ndlp);
2252 2252 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2253 lpfc_nlp_put(ndlp); 2253 lpfc_nlp_put(ndlp);
2254 } 2254 }
2255 } 2255 }
@@ -8138,36 +8138,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
8138} 8138}
8139 8139
8140/** 8140/**
8141 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
8142 * @phba: Pointer to HBA context object.
8143 *
8144 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
8145 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
8146 * held.
8147 *
8148 * Return: index into SLI4 fast-path FCP queue index.
8149 **/
8150static inline int
8151lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
8152{
8153 struct lpfc_vector_map_info *cpup;
8154 int chann, cpu;
8155
8156 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
8157 && phba->cfg_fcp_io_channel > 1) {
8158 cpu = smp_processor_id();
8159 if (cpu < phba->sli4_hba.num_present_cpu) {
8160 cpup = phba->sli4_hba.cpu_map;
8161 cpup += cpu;
8162 return cpup->channel_id;
8163 }
8164 }
8165 chann = atomic_add_return(1, &phba->fcp_qidx);
8166 chann = (chann % phba->cfg_fcp_io_channel);
8167 return chann;
8168}
8169
8170/**
8171 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 8141 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
8172 * @phba: Pointer to HBA context object. 8142 * @phba: Pointer to HBA context object.
8173 * @piocb: Pointer to command iocb. 8143 * @piocb: Pointer to command iocb.
@@ -8792,32 +8762,44 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8792 return 0; 8762 return 0;
8793} 8763}
8794 8764
8765/**
8766 * lpfc_sli_calc_ring - Calculates which ring to use
8767 * @phba: Pointer to HBA context object.
8768 * @ring_number: Initial ring
8769 * @piocb: Pointer to command iocb.
8770 *
8771 * For SLI4, FCP IO can deferred to one fo many WQs, based on
8772 * fcp_wqidx, thus we need to calculate the corresponding ring.
8773 * Since ABORTS must go on the same WQ of the command they are
8774 * aborting, we use command's fcp_wqidx.
8775 */
8795int 8776int
8796lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number, 8777lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
8797 struct lpfc_iocbq *piocb) 8778 struct lpfc_iocbq *piocb)
8798{ 8779{
8799 uint32_t idx; 8780 if (phba->sli_rev < LPFC_SLI_REV4)
8781 return ring_number;
8800 8782
8801 if (phba->sli_rev == LPFC_SLI_REV4) { 8783 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
8802 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { 8784 if (!(phba->cfg_fof) ||
8785 (!(piocb->iocb_flag & LPFC_IO_FOF))) {
8786 if (unlikely(!phba->sli4_hba.fcp_wq))
8787 return LPFC_HBA_ERROR;
8803 /* 8788 /*
8804 * fcp_wqidx should already be setup based on what 8789 * for abort iocb fcp_wqidx should already
8805 * completion queue we want to use. 8790 * be setup based on what work queue we used.
8806 */ 8791 */
8807 if (!(phba->cfg_fof) || 8792 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
8808 (!(piocb->iocb_flag & LPFC_IO_FOF))) { 8793 piocb->fcp_wqidx =
8809 if (unlikely(!phba->sli4_hba.fcp_wq)) 8794 lpfc_sli4_scmd_to_wqidx_distr(phba,
8810 return LPFC_HBA_ERROR; 8795 piocb->context1);
8811 idx = lpfc_sli4_scmd_to_wqidx_distr(phba); 8796 ring_number = MAX_SLI3_CONFIGURED_RINGS +
8812 piocb->fcp_wqidx = idx; 8797 piocb->fcp_wqidx;
8813 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx; 8798 } else {
8814 } else { 8799 if (unlikely(!phba->sli4_hba.oas_wq))
8815 if (unlikely(!phba->sli4_hba.oas_wq)) 8800 return LPFC_HBA_ERROR;
8816 return LPFC_HBA_ERROR; 8801 piocb->fcp_wqidx = 0;
8817 idx = 0; 8802 ring_number = LPFC_FCP_OAS_RING;
8818 piocb->fcp_wqidx = idx;
8819 ring_number = LPFC_FCP_OAS_RING;
8820 }
8821 } 8803 }
8822 } 8804 }
8823 return ring_number; 8805 return ring_number;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 6eca3b8124d3..d1a5b057c6f3 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -602,6 +602,7 @@ struct lpfc_sli4_hba {
602 struct lpfc_iov iov; 602 struct lpfc_iov iov;
603 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ 603 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
604 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ 604 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
605 uint32_t physical_port;
605 606
606 /* CPU to vector mapping information */ 607 /* CPU to vector mapping information */
607 struct lpfc_vector_map_info *cpu_map; 608 struct lpfc_vector_map_info *cpu_map;
@@ -651,6 +652,26 @@ struct lpfc_rsrc_blks {
651 uint16_t rsrc_used; 652 uint16_t rsrc_used;
652}; 653};
653 654
655struct lpfc_rdp_context {
656 struct lpfc_nodelist *ndlp;
657 uint16_t ox_id;
658 uint16_t rx_id;
659 READ_LNK_VAR link_stat;
660 uint8_t page_a0[DMP_SFF_PAGE_A0_SIZE];
661 uint8_t page_a2[DMP_SFF_PAGE_A2_SIZE];
662 void (*cmpl)(struct lpfc_hba *, struct lpfc_rdp_context*, int);
663};
664
665struct lpfc_lcb_context {
666 uint8_t sub_command;
667 uint8_t type;
668 uint8_t frequency;
669 uint16_t ox_id;
670 uint16_t rx_id;
671 struct lpfc_nodelist *ndlp;
672};
673
674
654/* 675/*
655 * SLI4 specific function prototypes 676 * SLI4 specific function prototypes
656 */ 677 */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c37bb9f91c3b..6258d3d7722a 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "10.5.0.0." 21#define LPFC_DRIVER_VERSION "10.7.0.0."
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index a87ee33f4f2a..769012663a8f 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -567,8 +567,8 @@ int
567lpfc_vport_delete(struct fc_vport *fc_vport) 567lpfc_vport_delete(struct fc_vport *fc_vport)
568{ 568{
569 struct lpfc_nodelist *ndlp = NULL; 569 struct lpfc_nodelist *ndlp = NULL;
570 struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost;
571 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; 570 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
571 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
572 struct lpfc_hba *phba = vport->phba; 572 struct lpfc_hba *phba = vport->phba;
573 long timeout; 573 long timeout;
574 bool ns_ndlp_referenced = false; 574 bool ns_ndlp_referenced = false;
@@ -645,8 +645,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
645 } 645 }
646 646
647 /* Remove FC host and then SCSI host with the vport */ 647 /* Remove FC host and then SCSI host with the vport */
648 fc_remove_host(lpfc_shost_from_vport(vport)); 648 fc_remove_host(shost);
649 scsi_remove_host(lpfc_shost_from_vport(vport)); 649 scsi_remove_host(shost);
650 650
651 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 651 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
652 652
@@ -772,7 +772,8 @@ skip_logo:
772 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) 772 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
773 * does the scsi_host_put() to release the vport. 773 * does the scsi_host_put() to release the vport.
774 */ 774 */
775 if (lpfc_mbx_unreg_vpi(vport)) 775 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) ||
776 lpfc_mbx_unreg_vpi(vport))
776 scsi_host_put(shost); 777 scsi_host_put(shost);
777 } else 778 } else
778 scsi_host_put(shost); 779 scsi_host_put(shost);
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 0adb2e015597..141226631429 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -403,7 +403,6 @@ static struct scsi_host_template mac53c94_template = {
403 .can_queue = 1, 403 .can_queue = 1,
404 .this_id = 7, 404 .this_id = 7,
405 .sg_tablesize = SG_ALL, 405 .sg_tablesize = SG_ALL,
406 .cmd_per_lun = 1,
407 .use_clustering = DISABLE_CLUSTERING, 406 .use_clustering = DISABLE_CLUSTERING,
408}; 407};
409 408
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 14e5c7cea929..20c37541963f 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,7 +35,8 @@
35/* 35/*
36 * MegaRAID SAS Driver meta data 36 * MegaRAID SAS Driver meta data
37 */ 37 */
38#define MEGASAS_VERSION "06.806.08.00-rc1" 38#define MEGASAS_VERSION "06.807.10.00-rc1"
39#define MEGASAS_RELDATE "March 6, 2015"
39 40
40/* 41/*
41 * Device IDs 42 * Device IDs
@@ -153,6 +154,9 @@
153#define MFI_FRAME_DIR_BOTH 0x0018 154#define MFI_FRAME_DIR_BOTH 0x0018
154#define MFI_FRAME_IEEE 0x0020 155#define MFI_FRAME_IEEE 0x0020
155 156
157/* Driver internal */
158#define DRV_DCMD_POLLED_MODE 0x1
159
156/* 160/*
157 * Definition for cmd_status 161 * Definition for cmd_status
158 */ 162 */
@@ -408,7 +412,7 @@ enum MR_PD_STATE {
408 * defines the physical drive address structure 412 * defines the physical drive address structure
409 */ 413 */
410struct MR_PD_ADDRESS { 414struct MR_PD_ADDRESS {
411 u16 deviceId; 415 __le16 deviceId;
412 u16 enclDeviceId; 416 u16 enclDeviceId;
413 417
414 union { 418 union {
@@ -433,8 +437,8 @@ struct MR_PD_ADDRESS {
433 * defines the physical drive list structure 437 * defines the physical drive list structure
434 */ 438 */
435struct MR_PD_LIST { 439struct MR_PD_LIST {
436 u32 size; 440 __le32 size;
437 u32 count; 441 __le32 count;
438 struct MR_PD_ADDRESS addr[1]; 442 struct MR_PD_ADDRESS addr[1];
439} __packed; 443} __packed;
440 444
@@ -451,28 +455,28 @@ union MR_LD_REF {
451 struct { 455 struct {
452 u8 targetId; 456 u8 targetId;
453 u8 reserved; 457 u8 reserved;
454 u16 seqNum; 458 __le16 seqNum;
455 }; 459 };
456 u32 ref; 460 __le32 ref;
457} __packed; 461} __packed;
458 462
459/* 463/*
460 * defines the logical drive list structure 464 * defines the logical drive list structure
461 */ 465 */
462struct MR_LD_LIST { 466struct MR_LD_LIST {
463 u32 ldCount; 467 __le32 ldCount;
464 u32 reserved; 468 __le32 reserved;
465 struct { 469 struct {
466 union MR_LD_REF ref; 470 union MR_LD_REF ref;
467 u8 state; 471 u8 state;
468 u8 reserved[3]; 472 u8 reserved[3];
469 u64 size; 473 __le64 size;
470 } ldList[MAX_LOGICAL_DRIVES_EXT]; 474 } ldList[MAX_LOGICAL_DRIVES_EXT];
471} __packed; 475} __packed;
472 476
473struct MR_LD_TARGETID_LIST { 477struct MR_LD_TARGETID_LIST {
474 u32 size; 478 __le32 size;
475 u32 count; 479 __le32 count;
476 u8 pad[3]; 480 u8 pad[3];
477 u8 targetId[MAX_LOGICAL_DRIVES_EXT]; 481 u8 targetId[MAX_LOGICAL_DRIVES_EXT];
478}; 482};
@@ -553,7 +557,7 @@ struct megasas_ctrl_prop {
553 } OnOffProperties; 557 } OnOffProperties;
554 u8 autoSnapVDSpace; 558 u8 autoSnapVDSpace;
555 u8 viewSpace; 559 u8 viewSpace;
556 u16 spinDownTime; 560 __le16 spinDownTime;
557 u8 reserved[24]; 561 u8 reserved[24];
558} __packed; 562} __packed;
559 563
@@ -567,10 +571,10 @@ struct megasas_ctrl_info {
567 */ 571 */
568 struct { 572 struct {
569 573
570 u16 vendor_id; 574 __le16 vendor_id;
571 u16 device_id; 575 __le16 device_id;
572 u16 sub_vendor_id; 576 __le16 sub_vendor_id;
573 u16 sub_device_id; 577 __le16 sub_device_id;
574 u8 reserved[24]; 578 u8 reserved[24];
575 579
576 } __attribute__ ((packed)) pci; 580 } __attribute__ ((packed)) pci;
@@ -611,8 +615,8 @@ struct megasas_ctrl_info {
611 /* 615 /*
612 * List of components residing in flash. All str are null terminated 616 * List of components residing in flash. All str are null terminated
613 */ 617 */
614 u32 image_check_word; 618 __le32 image_check_word;
615 u32 image_component_count; 619 __le32 image_component_count;
616 620
617 struct { 621 struct {
618 622
@@ -629,7 +633,7 @@ struct megasas_ctrl_info {
629 * empty if a flash operation has not occurred. All stings are null 633 * empty if a flash operation has not occurred. All stings are null
630 * terminated 634 * terminated
631 */ 635 */
632 u32 pending_image_component_count; 636 __le32 pending_image_component_count;
633 637
634 struct { 638 struct {
635 639
@@ -662,39 +666,39 @@ struct megasas_ctrl_info {
662 666
663 } __attribute__ ((packed)) hw_present; 667 } __attribute__ ((packed)) hw_present;
664 668
665 u32 current_fw_time; 669 __le32 current_fw_time;
666 670
667 /* 671 /*
668 * Maximum data transfer sizes 672 * Maximum data transfer sizes
669 */ 673 */
670 u16 max_concurrent_cmds; 674 __le16 max_concurrent_cmds;
671 u16 max_sge_count; 675 __le16 max_sge_count;
672 u32 max_request_size; 676 __le32 max_request_size;
673 677
674 /* 678 /*
675 * Logical and physical device counts 679 * Logical and physical device counts
676 */ 680 */
677 u16 ld_present_count; 681 __le16 ld_present_count;
678 u16 ld_degraded_count; 682 __le16 ld_degraded_count;
679 u16 ld_offline_count; 683 __le16 ld_offline_count;
680 684
681 u16 pd_present_count; 685 __le16 pd_present_count;
682 u16 pd_disk_present_count; 686 __le16 pd_disk_present_count;
683 u16 pd_disk_pred_failure_count; 687 __le16 pd_disk_pred_failure_count;
684 u16 pd_disk_failed_count; 688 __le16 pd_disk_failed_count;
685 689
686 /* 690 /*
687 * Memory size information 691 * Memory size information
688 */ 692 */
689 u16 nvram_size; 693 __le16 nvram_size;
690 u16 memory_size; 694 __le16 memory_size;
691 u16 flash_size; 695 __le16 flash_size;
692 696
693 /* 697 /*
694 * Error counters 698 * Error counters
695 */ 699 */
696 u16 mem_correctable_error_count; 700 __le16 mem_correctable_error_count;
697 u16 mem_uncorrectable_error_count; 701 __le16 mem_uncorrectable_error_count;
698 702
699 /* 703 /*
700 * Cluster information 704 * Cluster information
@@ -705,7 +709,7 @@ struct megasas_ctrl_info {
705 /* 709 /*
706 * Additional max data transfer sizes 710 * Additional max data transfer sizes
707 */ 711 */
708 u16 max_strips_per_io; 712 __le16 max_strips_per_io;
709 713
710 /* 714 /*
711 * Controller capabilities structures 715 * Controller capabilities structures
@@ -805,7 +809,7 @@ struct megasas_ctrl_info {
805 * deviceInterface.portAddr, and the rest shall be 809 * deviceInterface.portAddr, and the rest shall be
806 * populated in deviceInterfacePortAddr2. 810 * populated in deviceInterfacePortAddr2.
807 */ 811 */
808 u64 deviceInterfacePortAddr2[8]; /*6a0h */ 812 __le64 deviceInterfacePortAddr2[8]; /*6a0h */
809 u8 reserved3[128]; /*6e0h */ 813 u8 reserved3[128]; /*6e0h */
810 814
811 struct { /*760h */ 815 struct { /*760h */
@@ -842,26 +846,26 @@ struct megasas_ctrl_info {
842 u16 reserved[6]; 846 u16 reserved[6];
843 } pdsForRaidLevels; 847 } pdsForRaidLevels;
844 848
845 u16 maxPds; /*780h */ 849 __le16 maxPds; /*780h */
846 u16 maxDedHSPs; /*782h */ 850 __le16 maxDedHSPs; /*782h */
847 u16 maxGlobalHSPs; /*784h */ 851 __le16 maxGlobalHSP; /*784h */
848 u16 ddfSize; /*786h */ 852 __le16 ddfSize; /*786h */
849 u8 maxLdsPerArray; /*788h */ 853 u8 maxLdsPerArray; /*788h */
850 u8 partitionsInDDF; /*789h */ 854 u8 partitionsInDDF; /*789h */
851 u8 lockKeyBinding; /*78ah */ 855 u8 lockKeyBinding; /*78ah */
852 u8 maxPITsPerLd; /*78bh */ 856 u8 maxPITsPerLd; /*78bh */
853 u8 maxViewsPerLd; /*78ch */ 857 u8 maxViewsPerLd; /*78ch */
854 u8 maxTargetId; /*78dh */ 858 u8 maxTargetId; /*78dh */
855 u16 maxBvlVdSize; /*78eh */ 859 __le16 maxBvlVdSize; /*78eh */
856 860
857 u16 maxConfigurableSSCSize; /*790h */ 861 __le16 maxConfigurableSSCSize; /*790h */
858 u16 currentSSCsize; /*792h */ 862 __le16 currentSSCsize; /*792h */
859 863
860 char expanderFwVersion[12]; /*794h */ 864 char expanderFwVersion[12]; /*794h */
861 865
862 u16 PFKTrialTimeRemaining; /*7A0h */ 866 __le16 PFKTrialTimeRemaining; /*7A0h */
863 867
864 u16 cacheMemorySize; /*7A2h */ 868 __le16 cacheMemorySize; /*7A2h */
865 869
866 struct { /*7A4h */ 870 struct { /*7A4h */
867#if defined(__BIG_ENDIAN_BITFIELD) 871#if defined(__BIG_ENDIAN_BITFIELD)
@@ -931,7 +935,7 @@ struct megasas_ctrl_info {
931 u8 temperatureROC; /*7C9h */ 935 u8 temperatureROC; /*7C9h */
932 u8 temperatureCtrl; /*7CAh */ 936 u8 temperatureCtrl; /*7CAh */
933 u8 reserved4; /*7CBh */ 937 u8 reserved4; /*7CBh */
934 u16 maxConfigurablePds; /*7CCh */ 938 __le16 maxConfigurablePds; /*7CCh */
935 939
936 940
937 u8 reserved5[2]; /*0x7CDh */ 941 u8 reserved5[2]; /*0x7CDh */
@@ -1042,11 +1046,6 @@ struct megasas_ctrl_info {
1042 1046
1043#define VD_EXT_DEBUG 0 1047#define VD_EXT_DEBUG 0
1044 1048
1045enum MR_MFI_MPT_PTHR_FLAGS {
1046 MFI_MPT_DETACHED = 0,
1047 MFI_LIST_ADDED = 1,
1048 MFI_MPT_ATTACHED = 2,
1049};
1050 1049
1051enum MR_SCSI_CMD_TYPE { 1050enum MR_SCSI_CMD_TYPE {
1052 READ_WRITE_LDIO = 0, 1051 READ_WRITE_LDIO = 0,
@@ -1084,6 +1083,7 @@ enum MR_SCSI_CMD_TYPE {
1084#define MEGASAS_SKINNY_INT_CMDS 5 1083#define MEGASAS_SKINNY_INT_CMDS 5
1085#define MEGASAS_FUSION_INTERNAL_CMDS 5 1084#define MEGASAS_FUSION_INTERNAL_CMDS 5
1086#define MEGASAS_FUSION_IOCTL_CMDS 3 1085#define MEGASAS_FUSION_IOCTL_CMDS 3
1086#define MEGASAS_MFI_IOCTL_CMDS 27
1087 1087
1088#define MEGASAS_MAX_MSIX_QUEUES 128 1088#define MEGASAS_MAX_MSIX_QUEUES 128
1089/* 1089/*
@@ -1172,22 +1172,22 @@ struct megasas_register_set {
1172 1172
1173struct megasas_sge32 { 1173struct megasas_sge32 {
1174 1174
1175 u32 phys_addr; 1175 __le32 phys_addr;
1176 u32 length; 1176 __le32 length;
1177 1177
1178} __attribute__ ((packed)); 1178} __attribute__ ((packed));
1179 1179
1180struct megasas_sge64 { 1180struct megasas_sge64 {
1181 1181
1182 u64 phys_addr; 1182 __le64 phys_addr;
1183 u32 length; 1183 __le32 length;
1184 1184
1185} __attribute__ ((packed)); 1185} __attribute__ ((packed));
1186 1186
1187struct megasas_sge_skinny { 1187struct megasas_sge_skinny {
1188 u64 phys_addr; 1188 __le64 phys_addr;
1189 u32 length; 1189 __le32 length;
1190 u32 flag; 1190 __le32 flag;
1191} __packed; 1191} __packed;
1192 1192
1193union megasas_sgl { 1193union megasas_sgl {
@@ -1210,12 +1210,12 @@ struct megasas_header {
1210 u8 cdb_len; /*06h */ 1210 u8 cdb_len; /*06h */
1211 u8 sge_count; /*07h */ 1211 u8 sge_count; /*07h */
1212 1212
1213 u32 context; /*08h */ 1213 __le32 context; /*08h */
1214 u32 pad_0; /*0Ch */ 1214 __le32 pad_0; /*0Ch */
1215 1215
1216 u16 flags; /*10h */ 1216 __le16 flags; /*10h */
1217 u16 timeout; /*12h */ 1217 __le16 timeout; /*12h */
1218 u32 data_xferlen; /*14h */ 1218 __le32 data_xferlen; /*14h */
1219 1219
1220} __attribute__ ((packed)); 1220} __attribute__ ((packed));
1221 1221
@@ -1248,7 +1248,7 @@ typedef union _MFI_CAPABILITIES {
1248 u32 reserved:25; 1248 u32 reserved:25;
1249#endif 1249#endif
1250 } mfi_capabilities; 1250 } mfi_capabilities;
1251 u32 reg; 1251 __le32 reg;
1252} MFI_CAPABILITIES; 1252} MFI_CAPABILITIES;
1253 1253
1254struct megasas_init_frame { 1254struct megasas_init_frame {
@@ -1260,33 +1260,35 @@ struct megasas_init_frame {
1260 u8 reserved_1; /*03h */ 1260 u8 reserved_1; /*03h */
1261 MFI_CAPABILITIES driver_operations; /*04h*/ 1261 MFI_CAPABILITIES driver_operations; /*04h*/
1262 1262
1263 u32 context; /*08h */ 1263 __le32 context; /*08h */
1264 u32 pad_0; /*0Ch */ 1264 __le32 pad_0; /*0Ch */
1265
1266 u16 flags; /*10h */
1267 u16 reserved_3; /*12h */
1268 u32 data_xfer_len; /*14h */
1269 1265
1270 u32 queue_info_new_phys_addr_lo; /*18h */ 1266 __le16 flags; /*10h */
1271 u32 queue_info_new_phys_addr_hi; /*1Ch */ 1267 __le16 reserved_3; /*12h */
1272 u32 queue_info_old_phys_addr_lo; /*20h */ 1268 __le32 data_xfer_len; /*14h */
1273 u32 queue_info_old_phys_addr_hi; /*24h */
1274 1269
1275 u32 reserved_4[6]; /*28h */ 1270 __le32 queue_info_new_phys_addr_lo; /*18h */
1271 __le32 queue_info_new_phys_addr_hi; /*1Ch */
1272 __le32 queue_info_old_phys_addr_lo; /*20h */
1273 __le32 queue_info_old_phys_addr_hi; /*24h */
1274 __le32 reserved_4[2]; /*28h */
1275 __le32 system_info_lo; /*30h */
1276 __le32 system_info_hi; /*34h */
1277 __le32 reserved_5[2]; /*38h */
1276 1278
1277} __attribute__ ((packed)); 1279} __attribute__ ((packed));
1278 1280
1279struct megasas_init_queue_info { 1281struct megasas_init_queue_info {
1280 1282
1281 u32 init_flags; /*00h */ 1283 __le32 init_flags; /*00h */
1282 u32 reply_queue_entries; /*04h */ 1284 __le32 reply_queue_entries; /*04h */
1283 1285
1284 u32 reply_queue_start_phys_addr_lo; /*08h */ 1286 __le32 reply_queue_start_phys_addr_lo; /*08h */
1285 u32 reply_queue_start_phys_addr_hi; /*0Ch */ 1287 __le32 reply_queue_start_phys_addr_hi; /*0Ch */
1286 u32 producer_index_phys_addr_lo; /*10h */ 1288 __le32 producer_index_phys_addr_lo; /*10h */
1287 u32 producer_index_phys_addr_hi; /*14h */ 1289 __le32 producer_index_phys_addr_hi; /*14h */
1288 u32 consumer_index_phys_addr_lo; /*18h */ 1290 __le32 consumer_index_phys_addr_lo; /*18h */
1289 u32 consumer_index_phys_addr_hi; /*1Ch */ 1291 __le32 consumer_index_phys_addr_hi; /*1Ch */
1290 1292
1291} __attribute__ ((packed)); 1293} __attribute__ ((packed));
1292 1294
@@ -1302,18 +1304,18 @@ struct megasas_io_frame {
1302 u8 reserved_0; /*06h */ 1304 u8 reserved_0; /*06h */
1303 u8 sge_count; /*07h */ 1305 u8 sge_count; /*07h */
1304 1306
1305 u32 context; /*08h */ 1307 __le32 context; /*08h */
1306 u32 pad_0; /*0Ch */ 1308 __le32 pad_0; /*0Ch */
1307 1309
1308 u16 flags; /*10h */ 1310 __le16 flags; /*10h */
1309 u16 timeout; /*12h */ 1311 __le16 timeout; /*12h */
1310 u32 lba_count; /*14h */ 1312 __le32 lba_count; /*14h */
1311 1313
1312 u32 sense_buf_phys_addr_lo; /*18h */ 1314 __le32 sense_buf_phys_addr_lo; /*18h */
1313 u32 sense_buf_phys_addr_hi; /*1Ch */ 1315 __le32 sense_buf_phys_addr_hi; /*1Ch */
1314 1316
1315 u32 start_lba_lo; /*20h */ 1317 __le32 start_lba_lo; /*20h */
1316 u32 start_lba_hi; /*24h */ 1318 __le32 start_lba_hi; /*24h */
1317 1319
1318 union megasas_sgl sgl; /*28h */ 1320 union megasas_sgl sgl; /*28h */
1319 1321
@@ -1331,15 +1333,15 @@ struct megasas_pthru_frame {
1331 u8 cdb_len; /*06h */ 1333 u8 cdb_len; /*06h */
1332 u8 sge_count; /*07h */ 1334 u8 sge_count; /*07h */
1333 1335
1334 u32 context; /*08h */ 1336 __le32 context; /*08h */
1335 u32 pad_0; /*0Ch */ 1337 __le32 pad_0; /*0Ch */
1336 1338
1337 u16 flags; /*10h */ 1339 __le16 flags; /*10h */
1338 u16 timeout; /*12h */ 1340 __le16 timeout; /*12h */
1339 u32 data_xfer_len; /*14h */ 1341 __le32 data_xfer_len; /*14h */
1340 1342
1341 u32 sense_buf_phys_addr_lo; /*18h */ 1343 __le32 sense_buf_phys_addr_lo; /*18h */
1342 u32 sense_buf_phys_addr_hi; /*1Ch */ 1344 __le32 sense_buf_phys_addr_hi; /*1Ch */
1343 1345
1344 u8 cdb[16]; /*20h */ 1346 u8 cdb[16]; /*20h */
1345 union megasas_sgl sgl; /*30h */ 1347 union megasas_sgl sgl; /*30h */
@@ -1354,19 +1356,19 @@ struct megasas_dcmd_frame {
1354 u8 reserved_1[4]; /*03h */ 1356 u8 reserved_1[4]; /*03h */
1355 u8 sge_count; /*07h */ 1357 u8 sge_count; /*07h */
1356 1358
1357 u32 context; /*08h */ 1359 __le32 context; /*08h */
1358 u32 pad_0; /*0Ch */ 1360 __le32 pad_0; /*0Ch */
1359 1361
1360 u16 flags; /*10h */ 1362 __le16 flags; /*10h */
1361 u16 timeout; /*12h */ 1363 __le16 timeout; /*12h */
1362 1364
1363 u32 data_xfer_len; /*14h */ 1365 __le32 data_xfer_len; /*14h */
1364 u32 opcode; /*18h */ 1366 __le32 opcode; /*18h */
1365 1367
1366 union { /*1Ch */ 1368 union { /*1Ch */
1367 u8 b[12]; 1369 u8 b[12];
1368 u16 s[6]; 1370 __le16 s[6];
1369 u32 w[3]; 1371 __le32 w[3];
1370 } mbox; 1372 } mbox;
1371 1373
1372 union megasas_sgl sgl; /*28h */ 1374 union megasas_sgl sgl; /*28h */
@@ -1380,22 +1382,22 @@ struct megasas_abort_frame {
1380 u8 cmd_status; /*02h */ 1382 u8 cmd_status; /*02h */
1381 1383
1382 u8 reserved_1; /*03h */ 1384 u8 reserved_1; /*03h */
1383 u32 reserved_2; /*04h */ 1385 __le32 reserved_2; /*04h */
1384 1386
1385 u32 context; /*08h */ 1387 __le32 context; /*08h */
1386 u32 pad_0; /*0Ch */ 1388 __le32 pad_0; /*0Ch */
1387 1389
1388 u16 flags; /*10h */ 1390 __le16 flags; /*10h */
1389 u16 reserved_3; /*12h */ 1391 __le16 reserved_3; /*12h */
1390 u32 reserved_4; /*14h */ 1392 __le32 reserved_4; /*14h */
1391 1393
1392 u32 abort_context; /*18h */ 1394 __le32 abort_context; /*18h */
1393 u32 pad_1; /*1Ch */ 1395 __le32 pad_1; /*1Ch */
1394 1396
1395 u32 abort_mfi_phys_addr_lo; /*20h */ 1397 __le32 abort_mfi_phys_addr_lo; /*20h */
1396 u32 abort_mfi_phys_addr_hi; /*24h */ 1398 __le32 abort_mfi_phys_addr_hi; /*24h */
1397 1399
1398 u32 reserved_5[6]; /*28h */ 1400 __le32 reserved_5[6]; /*28h */
1399 1401
1400} __attribute__ ((packed)); 1402} __attribute__ ((packed));
1401 1403
@@ -1409,14 +1411,14 @@ struct megasas_smp_frame {
1409 u8 reserved_2[3]; /*04h */ 1411 u8 reserved_2[3]; /*04h */
1410 u8 sge_count; /*07h */ 1412 u8 sge_count; /*07h */
1411 1413
1412 u32 context; /*08h */ 1414 __le32 context; /*08h */
1413 u32 pad_0; /*0Ch */ 1415 __le32 pad_0; /*0Ch */
1414 1416
1415 u16 flags; /*10h */ 1417 __le16 flags; /*10h */
1416 u16 timeout; /*12h */ 1418 __le16 timeout; /*12h */
1417 1419
1418 u32 data_xfer_len; /*14h */ 1420 __le32 data_xfer_len; /*14h */
1419 u64 sas_addr; /*18h */ 1421 __le64 sas_addr; /*18h */
1420 1422
1421 union { 1423 union {
1422 struct megasas_sge32 sge32[2]; /* [0]: resp [1]: req */ 1424 struct megasas_sge32 sge32[2]; /* [0]: resp [1]: req */
@@ -1436,16 +1438,16 @@ struct megasas_stp_frame {
1436 u8 reserved_3[2]; /*05h */ 1438 u8 reserved_3[2]; /*05h */
1437 u8 sge_count; /*07h */ 1439 u8 sge_count; /*07h */
1438 1440
1439 u32 context; /*08h */ 1441 __le32 context; /*08h */
1440 u32 pad_0; /*0Ch */ 1442 __le32 pad_0; /*0Ch */
1441 1443
1442 u16 flags; /*10h */ 1444 __le16 flags; /*10h */
1443 u16 timeout; /*12h */ 1445 __le16 timeout; /*12h */
1444 1446
1445 u32 data_xfer_len; /*14h */ 1447 __le32 data_xfer_len; /*14h */
1446 1448
1447 u16 fis[10]; /*18h */ 1449 __le16 fis[10]; /*18h */
1448 u32 stp_flags; 1450 __le32 stp_flags;
1449 1451
1450 union { 1452 union {
1451 struct megasas_sge32 sge32[2]; /* [0]: resp [1]: data */ 1453 struct megasas_sge32 sge32[2]; /* [0]: resp [1]: data */
@@ -1489,18 +1491,18 @@ union megasas_evt_class_locale {
1489} __attribute__ ((packed)); 1491} __attribute__ ((packed));
1490 1492
1491struct megasas_evt_log_info { 1493struct megasas_evt_log_info {
1492 u32 newest_seq_num; 1494 __le32 newest_seq_num;
1493 u32 oldest_seq_num; 1495 __le32 oldest_seq_num;
1494 u32 clear_seq_num; 1496 __le32 clear_seq_num;
1495 u32 shutdown_seq_num; 1497 __le32 shutdown_seq_num;
1496 u32 boot_seq_num; 1498 __le32 boot_seq_num;
1497 1499
1498} __attribute__ ((packed)); 1500} __attribute__ ((packed));
1499 1501
1500struct megasas_progress { 1502struct megasas_progress {
1501 1503
1502 u16 progress; 1504 __le16 progress;
1503 u16 elapsed_seconds; 1505 __le16 elapsed_seconds;
1504 1506
1505} __attribute__ ((packed)); 1507} __attribute__ ((packed));
1506 1508
@@ -1521,9 +1523,9 @@ struct megasas_evtarg_pd {
1521 1523
1522struct megasas_evt_detail { 1524struct megasas_evt_detail {
1523 1525
1524 u32 seq_num; 1526 __le32 seq_num;
1525 u32 time_stamp; 1527 __le32 time_stamp;
1526 u32 code; 1528 __le32 code;
1527 union megasas_evt_class_locale cl; 1529 union megasas_evt_class_locale cl;
1528 u8 arg_type; 1530 u8 arg_type;
1529 u8 reserved1[15]; 1531 u8 reserved1[15];
@@ -1542,18 +1544,18 @@ struct megasas_evt_detail {
1542 1544
1543 struct { 1545 struct {
1544 struct megasas_evtarg_ld ld; 1546 struct megasas_evtarg_ld ld;
1545 u64 count; 1547 __le64 count;
1546 } __attribute__ ((packed)) ld_count; 1548 } __attribute__ ((packed)) ld_count;
1547 1549
1548 struct { 1550 struct {
1549 u64 lba; 1551 __le64 lba;
1550 struct megasas_evtarg_ld ld; 1552 struct megasas_evtarg_ld ld;
1551 } __attribute__ ((packed)) ld_lba; 1553 } __attribute__ ((packed)) ld_lba;
1552 1554
1553 struct { 1555 struct {
1554 struct megasas_evtarg_ld ld; 1556 struct megasas_evtarg_ld ld;
1555 u32 prevOwner; 1557 __le32 prevOwner;
1556 u32 newOwner; 1558 __le32 newOwner;
1557 } __attribute__ ((packed)) ld_owner; 1559 } __attribute__ ((packed)) ld_owner;
1558 1560
1559 struct { 1561 struct {
@@ -1610,7 +1612,7 @@ struct megasas_evt_detail {
1610 1612
1611 struct { 1613 struct {
1612 u16 vendorId; 1614 u16 vendorId;
1613 u16 deviceId; 1615 __le16 deviceId;
1614 u16 subVendorId; 1616 u16 subVendorId;
1615 u16 subDeviceId; 1617 u16 subDeviceId;
1616 } __attribute__ ((packed)) pci; 1618 } __attribute__ ((packed)) pci;
@@ -1630,9 +1632,9 @@ struct megasas_evt_detail {
1630 } __attribute__ ((packed)) ecc; 1632 } __attribute__ ((packed)) ecc;
1631 1633
1632 u8 b[96]; 1634 u8 b[96];
1633 u16 s[48]; 1635 __le16 s[48];
1634 u32 w[24]; 1636 __le32 w[24];
1635 u64 d[12]; 1637 __le64 d[12];
1636 } args; 1638 } args;
1637 1639
1638 char description[128]; 1640 char description[128];
@@ -1649,12 +1651,22 @@ struct megasas_irq_context {
1649 u32 MSIxIndex; 1651 u32 MSIxIndex;
1650}; 1652};
1651 1653
1654struct MR_DRV_SYSTEM_INFO {
1655 u8 infoVersion;
1656 u8 systemIdLength;
1657 u16 reserved0;
1658 u8 systemId[64];
1659 u8 reserved[1980];
1660};
1661
1652struct megasas_instance { 1662struct megasas_instance {
1653 1663
1654 u32 *producer; 1664 __le32 *producer;
1655 dma_addr_t producer_h; 1665 dma_addr_t producer_h;
1656 u32 *consumer; 1666 __le32 *consumer;
1657 dma_addr_t consumer_h; 1667 dma_addr_t consumer_h;
1668 struct MR_DRV_SYSTEM_INFO *system_info_buf;
1669 dma_addr_t system_info_h;
1658 struct MR_LD_VF_AFFILIATION *vf_affiliation; 1670 struct MR_LD_VF_AFFILIATION *vf_affiliation;
1659 dma_addr_t vf_affiliation_h; 1671 dma_addr_t vf_affiliation_h;
1660 struct MR_LD_VF_AFFILIATION_111 *vf_affiliation_111; 1672 struct MR_LD_VF_AFFILIATION_111 *vf_affiliation_111;
@@ -1662,7 +1674,7 @@ struct megasas_instance {
1662 struct MR_CTRL_HB_HOST_MEM *hb_host_mem; 1674 struct MR_CTRL_HB_HOST_MEM *hb_host_mem;
1663 dma_addr_t hb_host_mem_h; 1675 dma_addr_t hb_host_mem_h;
1664 1676
1665 u32 *reply_queue; 1677 __le32 *reply_queue;
1666 dma_addr_t reply_queue_h; 1678 dma_addr_t reply_queue_h;
1667 1679
1668 u32 *crash_dump_buf; 1680 u32 *crash_dump_buf;
@@ -1681,7 +1693,7 @@ struct megasas_instance {
1681 spinlock_t crashdump_lock; 1693 spinlock_t crashdump_lock;
1682 1694
1683 struct megasas_register_set __iomem *reg_set; 1695 struct megasas_register_set __iomem *reg_set;
1684 u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; 1696 u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
1685 struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; 1697 struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
1686 struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD]; 1698 struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD];
1687 u8 ld_ids[MEGASAS_MAX_LD_IDS]; 1699 u8 ld_ids[MEGASAS_MAX_LD_IDS];
@@ -1769,6 +1781,7 @@ struct megasas_instance {
1769 u16 throttlequeuedepth; 1781 u16 throttlequeuedepth;
1770 u8 mask_interrupts; 1782 u8 mask_interrupts;
1771 u8 is_imr; 1783 u8 is_imr;
1784 bool dev_handle;
1772}; 1785};
1773struct MR_LD_VF_MAP { 1786struct MR_LD_VF_MAP {
1774 u32 size; 1787 u32 size;
@@ -1864,9 +1877,13 @@ struct megasas_instance_template {
1864#define MEGASAS_IS_LOGICAL(scp) \ 1877#define MEGASAS_IS_LOGICAL(scp) \
1865 (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 1878 (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
1866 1879
1867#define MEGASAS_DEV_INDEX(inst, scp) \ 1880#define MEGASAS_DEV_INDEX(scp) \
1868 ((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ 1881 (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
1869 scp->device->id 1882 scp->device->id)
1883
1884#define MEGASAS_PD_INDEX(scp) \
1885 ((scp->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + \
1886 scp->device->id)
1870 1887
1871struct megasas_cmd { 1888struct megasas_cmd {
1872 1889
@@ -1877,17 +1894,14 @@ struct megasas_cmd {
1877 1894
1878 u32 index; 1895 u32 index;
1879 u8 sync_cmd; 1896 u8 sync_cmd;
1880 u8 cmd_status; 1897 u8 cmd_status_drv;
1881 u8 abort_aen; 1898 u8 abort_aen;
1882 u8 retry_for_fw_reset; 1899 u8 retry_for_fw_reset;
1883 1900
1884 1901
1885 struct list_head list; 1902 struct list_head list;
1886 struct scsi_cmnd *scmd; 1903 struct scsi_cmnd *scmd;
1887 1904 u8 flags;
1888 void *mpt_pthr_cmd_blocked;
1889 atomic_t mfi_mpt_pthr;
1890 u8 is_wait_event;
1891 1905
1892 struct megasas_instance *instance; 1906 struct megasas_instance *instance;
1893 union { 1907 union {
@@ -1963,10 +1977,10 @@ u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map);
1963struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); 1977struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
1964u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map); 1978u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map);
1965u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map); 1979u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map);
1966u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map); 1980__le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map);
1967u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); 1981u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
1968 1982
1969u16 get_updated_dev_handle(struct megasas_instance *instance, 1983__le16 get_updated_dev_handle(struct megasas_instance *instance,
1970 struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info); 1984 struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info);
1971void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map, 1985void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map,
1972 struct LD_LOAD_BALANCE_INFO *lbInfo); 1986 struct LD_LOAD_BALANCE_INFO *lbInfo);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 4c3fc0eb8b30..71b884dae27c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -94,8 +94,8 @@ MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Defau
94 94
95MODULE_LICENSE("GPL"); 95MODULE_LICENSE("GPL");
96MODULE_VERSION(MEGASAS_VERSION); 96MODULE_VERSION(MEGASAS_VERSION);
97MODULE_AUTHOR("megaraidlinux@lsi.com"); 97MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
98MODULE_DESCRIPTION("LSI MegaRAID SAS Driver"); 98MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
99 99
100int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 100int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
101static int megasas_get_pd_list(struct megasas_instance *instance); 101static int megasas_get_pd_list(struct megasas_instance *instance);
@@ -215,7 +215,6 @@ struct megasas_cmd *megasas_get_cmd(struct megasas_instance
215 cmd = list_entry((&instance->cmd_pool)->next, 215 cmd = list_entry((&instance->cmd_pool)->next,
216 struct megasas_cmd, list); 216 struct megasas_cmd, list);
217 list_del_init(&cmd->list); 217 list_del_init(&cmd->list);
218 atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_DETACHED);
219 } else { 218 } else {
220 printk(KERN_ERR "megasas: Command pool empty!\n"); 219 printk(KERN_ERR "megasas: Command pool empty!\n");
221 } 220 }
@@ -225,52 +224,41 @@ struct megasas_cmd *megasas_get_cmd(struct megasas_instance
225} 224}
226 225
227/** 226/**
228 * __megasas_return_cmd - Return a cmd to free command pool 227 * megasas_return_cmd - Return a cmd to free command pool
229 * @instance: Adapter soft state 228 * @instance: Adapter soft state
230 * @cmd: Command packet to be returned to free command pool 229 * @cmd: Command packet to be returned to free command pool
231 */ 230 */
232inline void 231inline void
233__megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 232megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
234{ 233{
235 /* 234 unsigned long flags;
236 * Don't go ahead and free the MFI frame, if corresponding 235 u32 blk_tags;
237 * MPT frame is not freed(valid for only fusion adapters). 236 struct megasas_cmd_fusion *cmd_fusion;
238 * In case of MFI adapters, anyways for any allocated MFI 237 struct fusion_context *fusion = instance->ctrl_context;
239 * frame will have cmd->mfi_mpt_mpthr set to MFI_MPT_DETACHED 238
239 /* This flag is used only for fusion adapter.
240 * Wait for Interrupt for Polled mode DCMD
240 */ 241 */
241 if (atomic_read(&cmd->mfi_mpt_pthr) != MFI_MPT_DETACHED) 242 if (cmd->flags & DRV_DCMD_POLLED_MODE)
242 return; 243 return;
243 244
245 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
246
247 if (fusion) {
248 blk_tags = instance->max_scsi_cmds + cmd->index;
249 cmd_fusion = fusion->cmd_list[blk_tags];
250 megasas_return_cmd_fusion(instance, cmd_fusion);
251 }
244 cmd->scmd = NULL; 252 cmd->scmd = NULL;
245 cmd->frame_count = 0; 253 cmd->frame_count = 0;
246 cmd->is_wait_event = 0; 254 cmd->flags = 0;
247 cmd->mpt_pthr_cmd_blocked = NULL; 255 if (!fusion && reset_devices)
248
249 if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
250 (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
251 (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
252 (reset_devices))
253 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 256 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
254
255 atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED);
256 list_add(&cmd->list, (&instance->cmd_pool)->next); 257 list_add(&cmd->list, (&instance->cmd_pool)->next);
257}
258
259/**
260 * megasas_return_cmd - Return a cmd to free command pool
261 * @instance: Adapter soft state
262 * @cmd: Command packet to be returned to free command pool
263 */
264inline void
265megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
266{
267 unsigned long flags;
268 258
269 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
270 __megasas_return_cmd(instance, cmd);
271 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 259 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
272}
273 260
261}
274 262
275/** 263/**
276* The following functions are defined for xscale 264* The following functions are defined for xscale
@@ -814,8 +802,8 @@ megasas_adp_reset_gen2(struct megasas_instance *instance,
814{ 802{
815 u32 retry = 0 ; 803 u32 retry = 0 ;
816 u32 HostDiag; 804 u32 HostDiag;
817 u32 *seq_offset = &reg_set->seq_offset; 805 u32 __iomem *seq_offset = &reg_set->seq_offset;
818 u32 *hostdiag_offset = &reg_set->host_diag; 806 u32 __iomem *hostdiag_offset = &reg_set->host_diag;
819 807
820 if (instance->instancet == &megasas_instance_template_skinny) { 808 if (instance->instancet == &megasas_instance_template_skinny) {
821 seq_offset = &reg_set->fusion_seq_offset; 809 seq_offset = &reg_set->fusion_seq_offset;
@@ -910,7 +898,7 @@ extern struct megasas_instance_template megasas_instance_template_fusion;
910 * @instance: Adapter soft state 898 * @instance: Adapter soft state
911 * @cmd: Command packet to be issued 899 * @cmd: Command packet to be issued
912 * 900 *
913 * For polling, MFI requires the cmd_status to be set to 0xFF before posting. 901 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
914 */ 902 */
915int 903int
916megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 904megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
@@ -952,20 +940,20 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
952 struct megasas_cmd *cmd, int timeout) 940 struct megasas_cmd *cmd, int timeout)
953{ 941{
954 int ret = 0; 942 int ret = 0;
955 cmd->cmd_status = ENODATA; 943 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
956 944
957 cmd->is_wait_event = 1;
958 instance->instancet->issue_dcmd(instance, cmd); 945 instance->instancet->issue_dcmd(instance, cmd);
959 if (timeout) { 946 if (timeout) {
960 ret = wait_event_timeout(instance->int_cmd_wait_q, 947 ret = wait_event_timeout(instance->int_cmd_wait_q,
961 cmd->cmd_status != ENODATA, timeout * HZ); 948 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
962 if (!ret) 949 if (!ret)
963 return 1; 950 return 1;
964 } else 951 } else
965 wait_event(instance->int_cmd_wait_q, 952 wait_event(instance->int_cmd_wait_q,
966 cmd->cmd_status != ENODATA); 953 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
967 954
968 return 0; 955 return (cmd->cmd_status_drv == MFI_STAT_OK) ?
956 0 : 1;
969} 957}
970 958
971/** 959/**
@@ -998,7 +986,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
998 * Prepare and issue the abort frame 986 * Prepare and issue the abort frame
999 */ 987 */
1000 abort_fr->cmd = MFI_CMD_ABORT; 988 abort_fr->cmd = MFI_CMD_ABORT;
1001 abort_fr->cmd_status = 0xFF; 989 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1002 abort_fr->flags = cpu_to_le16(0); 990 abort_fr->flags = cpu_to_le16(0);
1003 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); 991 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1004 abort_fr->abort_mfi_phys_addr_lo = 992 abort_fr->abort_mfi_phys_addr_lo =
@@ -1007,13 +995,13 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1007 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 995 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1008 996
1009 cmd->sync_cmd = 1; 997 cmd->sync_cmd = 1;
1010 cmd->cmd_status = ENODATA; 998 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1011 999
1012 instance->instancet->issue_dcmd(instance, cmd); 1000 instance->instancet->issue_dcmd(instance, cmd);
1013 1001
1014 if (timeout) { 1002 if (timeout) {
1015 ret = wait_event_timeout(instance->abort_cmd_wait_q, 1003 ret = wait_event_timeout(instance->abort_cmd_wait_q,
1016 cmd->cmd_status != ENODATA, timeout * HZ); 1004 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1017 if (!ret) { 1005 if (!ret) {
1018 dev_err(&instance->pdev->dev, "Command timedout" 1006 dev_err(&instance->pdev->dev, "Command timedout"
1019 "from %s\n", __func__); 1007 "from %s\n", __func__);
@@ -1021,7 +1009,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1021 } 1009 }
1022 } else 1010 } else
1023 wait_event(instance->abort_cmd_wait_q, 1011 wait_event(instance->abort_cmd_wait_q,
1024 cmd->cmd_status != ENODATA); 1012 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1025 1013
1026 cmd->sync_cmd = 0; 1014 cmd->sync_cmd = 0;
1027 1015
@@ -1196,7 +1184,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1196 struct megasas_pthru_frame *pthru; 1184 struct megasas_pthru_frame *pthru;
1197 1185
1198 is_logical = MEGASAS_IS_LOGICAL(scp); 1186 is_logical = MEGASAS_IS_LOGICAL(scp);
1199 device_id = MEGASAS_DEV_INDEX(instance, scp); 1187 device_id = MEGASAS_DEV_INDEX(scp);
1200 pthru = (struct megasas_pthru_frame *)cmd->frame; 1188 pthru = (struct megasas_pthru_frame *)cmd->frame;
1201 1189
1202 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 1190 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
@@ -1232,7 +1220,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1232 */ 1220 */
1233 if (scp->device->type == TYPE_TAPE) { 1221 if (scp->device->type == TYPE_TAPE) {
1234 if ((scp->request->timeout / HZ) > 0xFFFF) 1222 if ((scp->request->timeout / HZ) > 0xFFFF)
1235 pthru->timeout = 0xFFFF; 1223 pthru->timeout = cpu_to_le16(0xFFFF);
1236 else 1224 else
1237 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); 1225 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1238 } 1226 }
@@ -1294,7 +1282,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1294 u16 flags = 0; 1282 u16 flags = 0;
1295 struct megasas_io_frame *ldio; 1283 struct megasas_io_frame *ldio;
1296 1284
1297 device_id = MEGASAS_DEV_INDEX(instance, scp); 1285 device_id = MEGASAS_DEV_INDEX(scp);
1298 ldio = (struct megasas_io_frame *)cmd->frame; 1286 ldio = (struct megasas_io_frame *)cmd->frame;
1299 1287
1300 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 1288 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
@@ -1698,7 +1686,7 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
1698* @instance: Adapter soft state 1686* @instance: Adapter soft state
1699* 1687*
1700*/ 1688*/
1701void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) 1689static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
1702{ 1690{
1703 int i; 1691 int i;
1704 struct megasas_cmd *cmd_mfi; 1692 struct megasas_cmd *cmd_mfi;
@@ -1922,22 +1910,24 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
1922 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 1910 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1923 1911
1924 dcmd->cmd = MFI_CMD_DCMD; 1912 dcmd->cmd = MFI_CMD_DCMD;
1925 dcmd->cmd_status = 0xFF; 1913 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
1926 dcmd->sge_count = 1; 1914 dcmd->sge_count = 1;
1927 dcmd->flags = MFI_FRAME_DIR_BOTH; 1915 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
1928 dcmd->timeout = 0; 1916 dcmd->timeout = 0;
1929 dcmd->pad_0 = 0; 1917 dcmd->pad_0 = 0;
1930 dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111); 1918 dcmd->data_xfer_len =
1931 dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111; 1919 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
1920 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
1932 1921
1933 if (initial) 1922 if (initial)
1934 dcmd->sgl.sge32[0].phys_addr = 1923 dcmd->sgl.sge32[0].phys_addr =
1935 instance->vf_affiliation_111_h; 1924 cpu_to_le32(instance->vf_affiliation_111_h);
1936 else 1925 else
1937 dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h; 1926 dcmd->sgl.sge32[0].phys_addr =
1927 cpu_to_le32(new_affiliation_111_h);
1938 1928
1939 dcmd->sgl.sge32[0].length = 1929 dcmd->sgl.sge32[0].length = cpu_to_le32(
1940 sizeof(struct MR_LD_VF_AFFILIATION_111); 1930 sizeof(struct MR_LD_VF_AFFILIATION_111));
1941 1931
1942 printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for " 1932 printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
1943 "scsi%d\n", instance->host->host_no); 1933 "scsi%d\n", instance->host->host_no);
@@ -1976,11 +1966,7 @@ out:
1976 new_affiliation_111_h); 1966 new_affiliation_111_h);
1977 } 1967 }
1978 1968
1979 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) 1969 megasas_return_cmd(instance, cmd);
1980 megasas_return_mfi_mpt_pthr(instance, cmd,
1981 cmd->mpt_pthr_cmd_blocked);
1982 else
1983 megasas_return_cmd(instance, cmd);
1984 1970
1985 return retval; 1971 return retval;
1986} 1972}
@@ -2037,22 +2023,24 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2037 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2023 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2038 2024
2039 dcmd->cmd = MFI_CMD_DCMD; 2025 dcmd->cmd = MFI_CMD_DCMD;
2040 dcmd->cmd_status = 0xFF; 2026 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2041 dcmd->sge_count = 1; 2027 dcmd->sge_count = 1;
2042 dcmd->flags = MFI_FRAME_DIR_BOTH; 2028 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2043 dcmd->timeout = 0; 2029 dcmd->timeout = 0;
2044 dcmd->pad_0 = 0; 2030 dcmd->pad_0 = 0;
2045 dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) * 2031 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2046 sizeof(struct MR_LD_VF_AFFILIATION); 2032 sizeof(struct MR_LD_VF_AFFILIATION));
2047 dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS; 2033 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2048 2034
2049 if (initial) 2035 if (initial)
2050 dcmd->sgl.sge32[0].phys_addr = instance->vf_affiliation_h; 2036 dcmd->sgl.sge32[0].phys_addr =
2037 cpu_to_le32(instance->vf_affiliation_h);
2051 else 2038 else
2052 dcmd->sgl.sge32[0].phys_addr = new_affiliation_h; 2039 dcmd->sgl.sge32[0].phys_addr =
2040 cpu_to_le32(new_affiliation_h);
2053 2041
2054 dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) * 2042 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2055 sizeof(struct MR_LD_VF_AFFILIATION); 2043 sizeof(struct MR_LD_VF_AFFILIATION));
2056 2044
2057 printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for " 2045 printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
2058 "scsi%d\n", instance->host->host_no); 2046 "scsi%d\n", instance->host->host_no);
@@ -2147,11 +2135,7 @@ out:
2147 (MAX_LOGICAL_DRIVES + 1) * 2135 (MAX_LOGICAL_DRIVES + 1) *
2148 sizeof(struct MR_LD_VF_AFFILIATION), 2136 sizeof(struct MR_LD_VF_AFFILIATION),
2149 new_affiliation, new_affiliation_h); 2137 new_affiliation, new_affiliation_h);
2150 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) 2138 megasas_return_cmd(instance, cmd);
2151 megasas_return_mfi_mpt_pthr(instance, cmd,
2152 cmd->mpt_pthr_cmd_blocked);
2153 else
2154 megasas_return_cmd(instance, cmd);
2155 2139
2156 return retval; 2140 return retval;
2157} 2141}
@@ -2204,39 +2188,33 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2204 2188
2205 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2189 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2206 2190
2207 dcmd->mbox.s[0] = sizeof(struct MR_CTRL_HB_HOST_MEM); 2191 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2208 dcmd->cmd = MFI_CMD_DCMD; 2192 dcmd->cmd = MFI_CMD_DCMD;
2209 dcmd->cmd_status = 0xFF; 2193 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2210 dcmd->sge_count = 1; 2194 dcmd->sge_count = 1;
2211 dcmd->flags = MFI_FRAME_DIR_BOTH; 2195 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2212 dcmd->timeout = 0; 2196 dcmd->timeout = 0;
2213 dcmd->pad_0 = 0; 2197 dcmd->pad_0 = 0;
2214 dcmd->data_xfer_len = sizeof(struct MR_CTRL_HB_HOST_MEM); 2198 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2215 dcmd->opcode = MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC; 2199 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2216 dcmd->sgl.sge32[0].phys_addr = instance->hb_host_mem_h; 2200 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
2217 dcmd->sgl.sge32[0].length = sizeof(struct MR_CTRL_HB_HOST_MEM); 2201 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2218 2202
2219 printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n", 2203 printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n",
2220 instance->host->host_no); 2204 instance->host->host_no);
2221 2205
2222 if (!megasas_issue_polled(instance, cmd)) { 2206 if (instance->ctrl_context && !instance->mask_interrupts)
2223 retval = 0; 2207 retval = megasas_issue_blocked_cmd(instance, cmd,
2224 } else { 2208 MEGASAS_ROUTINE_WAIT_TIME_VF);
2225 printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST" 2209 else
2226 "_MEM_ALLOC DCMD timed out for scsi%d\n", 2210 retval = megasas_issue_polled(instance, cmd);
2227 instance->host->host_no);
2228 retval = 1;
2229 goto out;
2230 }
2231
2232 2211
2233 if (dcmd->cmd_status) { 2212 if (retval) {
2234 printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST" 2213 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2235 "_MEM_ALLOC DCMD failed with status 0x%x for scsi%d\n", 2214 "_MEM_ALLOC DCMD %s for scsi%d\n",
2236 dcmd->cmd_status, 2215 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2237 instance->host->host_no); 2216 "timed out" : "failed", instance->host->host_no);
2238 retval = 1; 2217 retval = 1;
2239 goto out;
2240 } 2218 }
2241 2219
2242out: 2220out:
@@ -2332,7 +2310,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2332 "reset queue\n", 2310 "reset queue\n",
2333 reset_cmd); 2311 reset_cmd);
2334 2312
2335 reset_cmd->cmd_status = ENODATA; 2313 reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
2336 instance->instancet->fire_cmd(instance, 2314 instance->instancet->fire_cmd(instance,
2337 reset_cmd->frame_phys_addr, 2315 reset_cmd->frame_phys_addr,
2338 0, instance->reg_set); 2316 0, instance->reg_set);
@@ -2612,11 +2590,7 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2612 2590
2613 instance->aen_cmd = NULL; 2591 instance->aen_cmd = NULL;
2614 2592
2615 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) 2593 megasas_return_cmd(instance, cmd);
2616 megasas_return_mfi_mpt_pthr(instance, cmd,
2617 cmd->mpt_pthr_cmd_blocked);
2618 else
2619 megasas_return_cmd(instance, cmd);
2620 2594
2621 if ((instance->unload == 0) && 2595 if ((instance->unload == 0) &&
2622 ((instance->issuepend_done == 1))) { 2596 ((instance->issuepend_done == 1))) {
@@ -2786,7 +2760,7 @@ struct device_attribute *megaraid_host_attrs[] = {
2786static struct scsi_host_template megasas_template = { 2760static struct scsi_host_template megasas_template = {
2787 2761
2788 .module = THIS_MODULE, 2762 .module = THIS_MODULE,
2789 .name = "LSI SAS based MegaRAID driver", 2763 .name = "Avago SAS based MegaRAID driver",
2790 .proc_name = "megaraid_sas", 2764 .proc_name = "megaraid_sas",
2791 .slave_configure = megasas_slave_configure, 2765 .slave_configure = megasas_slave_configure,
2792 .slave_alloc = megasas_slave_alloc, 2766 .slave_alloc = megasas_slave_alloc,
@@ -2815,11 +2789,7 @@ static void
2815megasas_complete_int_cmd(struct megasas_instance *instance, 2789megasas_complete_int_cmd(struct megasas_instance *instance,
2816 struct megasas_cmd *cmd) 2790 struct megasas_cmd *cmd)
2817{ 2791{
2818 cmd->cmd_status = cmd->frame->io.cmd_status; 2792 cmd->cmd_status_drv = cmd->frame->io.cmd_status;
2819
2820 if (cmd->cmd_status == ENODATA) {
2821 cmd->cmd_status = 0;
2822 }
2823 wake_up(&instance->int_cmd_wait_q); 2793 wake_up(&instance->int_cmd_wait_q);
2824} 2794}
2825 2795
@@ -2838,7 +2808,7 @@ megasas_complete_abort(struct megasas_instance *instance,
2838{ 2808{
2839 if (cmd->sync_cmd) { 2809 if (cmd->sync_cmd) {
2840 cmd->sync_cmd = 0; 2810 cmd->sync_cmd = 0;
2841 cmd->cmd_status = 0; 2811 cmd->cmd_status_drv = 0;
2842 wake_up(&instance->abort_cmd_wait_q); 2812 wake_up(&instance->abort_cmd_wait_q);
2843 } 2813 }
2844 2814
@@ -2978,8 +2948,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
2978 "failed, status = 0x%x.\n", 2948 "failed, status = 0x%x.\n",
2979 cmd->frame->hdr.cmd_status); 2949 cmd->frame->hdr.cmd_status);
2980 else { 2950 else {
2981 megasas_return_mfi_mpt_pthr(instance, 2951 megasas_return_cmd(instance, cmd);
2982 cmd, cmd->mpt_pthr_cmd_blocked);
2983 spin_unlock_irqrestore( 2952 spin_unlock_irqrestore(
2984 instance->host->host_lock, 2953 instance->host->host_lock,
2985 flags); 2954 flags);
@@ -2987,8 +2956,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
2987 } 2956 }
2988 } else 2957 } else
2989 instance->map_id++; 2958 instance->map_id++;
2990 megasas_return_mfi_mpt_pthr(instance, cmd, 2959 megasas_return_cmd(instance, cmd);
2991 cmd->mpt_pthr_cmd_blocked);
2992 2960
2993 /* 2961 /*
2994 * Set fast path IO to ZERO. 2962 * Set fast path IO to ZERO.
@@ -3086,7 +3054,7 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3086 printk(KERN_NOTICE "megasas: %p synchronous cmd" 3054 printk(KERN_NOTICE "megasas: %p synchronous cmd"
3087 "on the internal reset queue," 3055 "on the internal reset queue,"
3088 "issue it again.\n", cmd); 3056 "issue it again.\n", cmd);
3089 cmd->cmd_status = ENODATA; 3057 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3090 instance->instancet->fire_cmd(instance, 3058 instance->instancet->fire_cmd(instance,
3091 cmd->frame_phys_addr , 3059 cmd->frame_phys_addr ,
3092 0, instance->reg_set); 3060 0, instance->reg_set);
@@ -3766,7 +3734,6 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
3766 cmd = instance->cmd_list[i]; 3734 cmd = instance->cmd_list[i];
3767 memset(cmd, 0, sizeof(struct megasas_cmd)); 3735 memset(cmd, 0, sizeof(struct megasas_cmd));
3768 cmd->index = i; 3736 cmd->index = i;
3769 atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED);
3770 cmd->scmd = NULL; 3737 cmd->scmd = NULL;
3771 cmd->instance = instance; 3738 cmd->instance = instance;
3772 3739
@@ -3827,7 +3794,7 @@ megasas_get_pd_list(struct megasas_instance *instance)
3827 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 3794 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
3828 dcmd->mbox.b[1] = 0; 3795 dcmd->mbox.b[1] = 0;
3829 dcmd->cmd = MFI_CMD_DCMD; 3796 dcmd->cmd = MFI_CMD_DCMD;
3830 dcmd->cmd_status = 0xFF; 3797 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
3831 dcmd->sge_count = 1; 3798 dcmd->sge_count = 1;
3832 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 3799 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
3833 dcmd->timeout = 0; 3800 dcmd->timeout = 0;
@@ -3874,11 +3841,7 @@ megasas_get_pd_list(struct megasas_instance *instance)
3874 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 3841 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
3875 ci, ci_h); 3842 ci, ci_h);
3876 3843
3877 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) 3844 megasas_return_cmd(instance, cmd);
3878 megasas_return_mfi_mpt_pthr(instance, cmd,
3879 cmd->mpt_pthr_cmd_blocked);
3880 else
3881 megasas_return_cmd(instance, cmd);
3882 3845
3883 return ret; 3846 return ret;
3884} 3847}
@@ -3927,7 +3890,7 @@ megasas_get_ld_list(struct megasas_instance *instance)
3927 if (instance->supportmax256vd) 3890 if (instance->supportmax256vd)
3928 dcmd->mbox.b[0] = 1; 3891 dcmd->mbox.b[0] = 1;
3929 dcmd->cmd = MFI_CMD_DCMD; 3892 dcmd->cmd = MFI_CMD_DCMD;
3930 dcmd->cmd_status = 0xFF; 3893 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
3931 dcmd->sge_count = 1; 3894 dcmd->sge_count = 1;
3932 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 3895 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
3933 dcmd->timeout = 0; 3896 dcmd->timeout = 0;
@@ -3965,11 +3928,7 @@ megasas_get_ld_list(struct megasas_instance *instance)
3965 ci, 3928 ci,
3966 ci_h); 3929 ci_h);
3967 3930
3968 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) 3931 megasas_return_cmd(instance, cmd);
3969 megasas_return_mfi_mpt_pthr(instance, cmd,
3970 cmd->mpt_pthr_cmd_blocked);
3971 else
3972 megasas_return_cmd(instance, cmd);
3973 return ret; 3932 return ret;
3974} 3933}
3975 3934
@@ -4020,7 +3979,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4020 dcmd->mbox.b[2] = 1; 3979 dcmd->mbox.b[2] = 1;
4021 3980
4022 dcmd->cmd = MFI_CMD_DCMD; 3981 dcmd->cmd = MFI_CMD_DCMD;
4023 dcmd->cmd_status = 0xFF; 3982 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4024 dcmd->sge_count = 1; 3983 dcmd->sge_count = 1;
4025 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 3984 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4026 dcmd->timeout = 0; 3985 dcmd->timeout = 0;
@@ -4050,11 +4009,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4050 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST), 4009 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
4051 ci, ci_h); 4010 ci, ci_h);
4052 4011
4053 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) 4012 megasas_return_cmd(instance, cmd);
4054 megasas_return_mfi_mpt_pthr(instance, cmd,
4055 cmd->mpt_pthr_cmd_blocked);
4056 else
4057 megasas_return_cmd(instance, cmd);
4058 4013
4059 return ret; 4014 return ret;
4060} 4015}
@@ -4091,12 +4046,11 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4091 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 4046 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4092 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4047 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4093 } 4048 }
4094 dev_info(&instance->pdev->dev, "Firmware supports %d VD %d PD\n", 4049
4095 instance->fw_supported_vd_count, 4050 dev_info(&instance->pdev->dev,
4096 instance->fw_supported_pd_count); 4051 "firmware type\t: %s\n",
4097 dev_info(&instance->pdev->dev, "Driver supports %d VD %d PD\n", 4052 instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
4098 instance->drv_supported_vd_count, 4053 "Legacy(64 VD) firmware");
4099 instance->drv_supported_pd_count);
4100 4054
4101 old_map_sz = sizeof(struct MR_FW_RAID_MAP) + 4055 old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
4102 (sizeof(struct MR_LD_SPAN_MAP) * 4056 (sizeof(struct MR_LD_SPAN_MAP) *
@@ -4158,7 +4112,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
4158 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4112 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4159 4113
4160 dcmd->cmd = MFI_CMD_DCMD; 4114 dcmd->cmd = MFI_CMD_DCMD;
4161 dcmd->cmd_status = 0xFF; 4115 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4162 dcmd->sge_count = 1; 4116 dcmd->sge_count = 1;
4163 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4117 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4164 dcmd->timeout = 0; 4118 dcmd->timeout = 0;
@@ -4181,16 +4135,17 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
4181 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); 4135 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
4182 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3); 4136 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
4183 megasas_update_ext_vd_details(instance); 4137 megasas_update_ext_vd_details(instance);
4138 instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
4139 dev_info(&instance->pdev->dev,
4140 "controller type\t: %s(%dMB)\n",
4141 instance->is_imr ? "iMR" : "MR",
4142 le16_to_cpu(ctrl_info->memory_size));
4184 } 4143 }
4185 4144
4186 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), 4145 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
4187 ci, ci_h); 4146 ci, ci_h);
4188 4147
4189 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) 4148 megasas_return_cmd(instance, cmd);
4190 megasas_return_mfi_mpt_pthr(instance, cmd,
4191 cmd->mpt_pthr_cmd_blocked);
4192 else
4193 megasas_return_cmd(instance, cmd);
4194 return ret; 4149 return ret;
4195} 4150}
4196 4151
@@ -4229,7 +4184,7 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance,
4229 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4184 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4230 dcmd->mbox.b[0] = crash_buf_state; 4185 dcmd->mbox.b[0] = crash_buf_state;
4231 dcmd->cmd = MFI_CMD_DCMD; 4186 dcmd->cmd = MFI_CMD_DCMD;
4232 dcmd->cmd_status = 0xFF; 4187 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4233 dcmd->sge_count = 1; 4188 dcmd->sge_count = 1;
4234 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 4189 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
4235 dcmd->timeout = 0; 4190 dcmd->timeout = 0;
@@ -4245,11 +4200,7 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance,
4245 else 4200 else
4246 ret = megasas_issue_polled(instance, cmd); 4201 ret = megasas_issue_polled(instance, cmd);
4247 4202
4248 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) 4203 megasas_return_cmd(instance, cmd);
4249 megasas_return_mfi_mpt_pthr(instance, cmd,
4250 cmd->mpt_pthr_cmd_blocked);
4251 else
4252 megasas_return_cmd(instance, cmd);
4253 return ret; 4204 return ret;
4254} 4205}
4255 4206
@@ -4262,7 +4213,7 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance,
4262static int 4213static int
4263megasas_issue_init_mfi(struct megasas_instance *instance) 4214megasas_issue_init_mfi(struct megasas_instance *instance)
4264{ 4215{
4265 u32 context; 4216 __le32 context;
4266 4217
4267 struct megasas_cmd *cmd; 4218 struct megasas_cmd *cmd;
4268 4219
@@ -4300,7 +4251,7 @@ megasas_issue_init_mfi(struct megasas_instance *instance)
4300 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); 4251 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
4301 4252
4302 init_frame->cmd = MFI_CMD_INIT; 4253 init_frame->cmd = MFI_CMD_INIT;
4303 init_frame->cmd_status = 0xFF; 4254 init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
4304 init_frame->queue_info_new_phys_addr_lo = 4255 init_frame->queue_info_new_phys_addr_lo =
4305 cpu_to_le32(lower_32_bits(initq_info_h)); 4256 cpu_to_le32(lower_32_bits(initq_info_h));
4306 init_frame->queue_info_new_phys_addr_hi = 4257 init_frame->queue_info_new_phys_addr_hi =
@@ -4354,6 +4305,21 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
4354 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> 4305 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
4355 0x10; 4306 0x10;
4356 /* 4307 /*
4308 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
4309 * are reserved for IOCTL + driver's internal DCMDs.
4310 */
4311 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4312 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
4313 instance->max_scsi_cmds = (instance->max_fw_cmds -
4314 MEGASAS_SKINNY_INT_CMDS);
4315 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
4316 } else {
4317 instance->max_scsi_cmds = (instance->max_fw_cmds -
4318 MEGASAS_INT_CMDS);
4319 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
4320 }
4321
4322 /*
4357 * Create a pool of commands 4323 * Create a pool of commands
4358 */ 4324 */
4359 if (megasas_alloc_cmds(instance)) 4325 if (megasas_alloc_cmds(instance))
@@ -4414,6 +4380,107 @@ fail_alloc_cmds:
4414 return 1; 4380 return 1;
4415} 4381}
4416 4382
4383/*
4384 * megasas_setup_irqs_msix - register legacy interrupts.
4385 * @instance: Adapter soft state
4386 *
4387 * Do not enable interrupt, only setup ISRs.
4388 *
4389 * Return 0 on success.
4390 */
4391static int
4392megasas_setup_irqs_ioapic(struct megasas_instance *instance)
4393{
4394 struct pci_dev *pdev;
4395
4396 pdev = instance->pdev;
4397 instance->irq_context[0].instance = instance;
4398 instance->irq_context[0].MSIxIndex = 0;
4399 if (request_irq(pdev->irq, instance->instancet->service_isr,
4400 IRQF_SHARED, "megasas", &instance->irq_context[0])) {
4401 dev_err(&instance->pdev->dev,
4402 "Failed to register IRQ from %s %d\n",
4403 __func__, __LINE__);
4404 return -1;
4405 }
4406 return 0;
4407}
4408
4409/**
4410 * megasas_setup_irqs_msix - register MSI-x interrupts.
4411 * @instance: Adapter soft state
4412 * @is_probe: Driver probe check
4413 *
4414 * Do not enable interrupt, only setup ISRs.
4415 *
4416 * Return 0 on success.
4417 */
4418static int
4419megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
4420{
4421 int i, j, cpu;
4422 struct pci_dev *pdev;
4423
4424 pdev = instance->pdev;
4425
4426 /* Try MSI-x */
4427 cpu = cpumask_first(cpu_online_mask);
4428 for (i = 0; i < instance->msix_vectors; i++) {
4429 instance->irq_context[i].instance = instance;
4430 instance->irq_context[i].MSIxIndex = i;
4431 if (request_irq(instance->msixentry[i].vector,
4432 instance->instancet->service_isr, 0, "megasas",
4433 &instance->irq_context[i])) {
4434 dev_err(&instance->pdev->dev,
4435 "Failed to register IRQ for vector %d.\n", i);
4436 for (j = 0; j < i; j++) {
4437 if (smp_affinity_enable)
4438 irq_set_affinity_hint(
4439 instance->msixentry[j].vector, NULL);
4440 free_irq(instance->msixentry[j].vector,
4441 &instance->irq_context[j]);
4442 }
4443 /* Retry irq register for IO_APIC*/
4444 instance->msix_vectors = 0;
4445 if (is_probe)
4446 return megasas_setup_irqs_ioapic(instance);
4447 else
4448 return -1;
4449 }
4450 if (smp_affinity_enable) {
4451 if (irq_set_affinity_hint(instance->msixentry[i].vector,
4452 get_cpu_mask(cpu)))
4453 dev_err(&instance->pdev->dev,
4454 "Failed to set affinity hint"
4455 " for cpu %d\n", cpu);
4456 cpu = cpumask_next(cpu, cpu_online_mask);
4457 }
4458 }
4459 return 0;
4460}
4461
4462/*
4463 * megasas_destroy_irqs- unregister interrupts.
4464 * @instance: Adapter soft state
4465 * return: void
4466 */
4467static void
4468megasas_destroy_irqs(struct megasas_instance *instance) {
4469
4470 int i;
4471
4472 if (instance->msix_vectors)
4473 for (i = 0; i < instance->msix_vectors; i++) {
4474 if (smp_affinity_enable)
4475 irq_set_affinity_hint(
4476 instance->msixentry[i].vector, NULL);
4477 free_irq(instance->msixentry[i].vector,
4478 &instance->irq_context[i]);
4479 }
4480 else
4481 free_irq(instance->pdev->irq, &instance->irq_context[0]);
4482}
4483
4417/** 4484/**
4418 * megasas_init_fw - Initializes the FW 4485 * megasas_init_fw - Initializes the FW
4419 * @instance: Adapter soft state 4486 * @instance: Adapter soft state
@@ -4499,7 +4566,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
4499 * It is used for all MPT based Adapters. 4566 * It is used for all MPT based Adapters.
4500 */ 4567 */
4501 instance->reply_post_host_index_addr[0] = 4568 instance->reply_post_host_index_addr[0] =
4502 (u32 *)((u8 *)instance->reg_set + 4569 (u32 __iomem *)((u8 __iomem *)instance->reg_set +
4503 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 4570 MPI2_REPLY_POST_HOST_INDEX_OFFSET);
4504 4571
4505 /* Check if MSI-X is supported while in ready state */ 4572 /* Check if MSI-X is supported while in ready state */
@@ -4531,7 +4598,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
4531 */ 4598 */
4532 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { 4599 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
4533 instance->reply_post_host_index_addr[loop] = 4600 instance->reply_post_host_index_addr[loop] =
4534 (u32 *)((u8 *)instance->reg_set + 4601 (u32 __iomem *)
4602 ((u8 __iomem *)instance->reg_set +
4535 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET 4603 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
4536 + (loop * 0x10)); 4604 + (loop * 0x10));
4537 } 4605 }
@@ -4551,14 +4619,19 @@ static int megasas_init_fw(struct megasas_instance *instance)
4551 instance->msix_vectors = i; 4619 instance->msix_vectors = i;
4552 else 4620 else
4553 instance->msix_vectors = 0; 4621 instance->msix_vectors = 0;
4554
4555 dev_info(&instance->pdev->dev, "[scsi%d]: FW supports"
4556 "<%d> MSIX vector,Online CPUs: <%d>,"
4557 "Current MSIX <%d>\n", instance->host->host_no,
4558 fw_msix_count, (unsigned int)num_online_cpus(),
4559 instance->msix_vectors);
4560 } 4622 }
4561 4623
4624 dev_info(&instance->pdev->dev,
4625 "firmware supports msix\t: (%d)", fw_msix_count);
4626 dev_info(&instance->pdev->dev,
4627 "current msix/online cpus\t: (%d/%d)\n",
4628 instance->msix_vectors, (unsigned int)num_online_cpus());
4629
4630 if (instance->msix_vectors ?
4631 megasas_setup_irqs_msix(instance, 1) :
4632 megasas_setup_irqs_ioapic(instance))
4633 goto fail_setup_irqs;
4634
4562 instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info), 4635 instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
4563 GFP_KERNEL); 4636 GFP_KERNEL);
4564 if (instance->ctrl_info == NULL) 4637 if (instance->ctrl_info == NULL)
@@ -4574,6 +4647,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
4574 if (instance->instancet->init_adapter(instance)) 4647 if (instance->instancet->init_adapter(instance))
4575 goto fail_init_adapter; 4648 goto fail_init_adapter;
4576 4649
4650 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
4651 (unsigned long)instance);
4652
4653 instance->instancet->enable_intr(instance);
4654
4577 printk(KERN_ERR "megasas: INIT adapter done\n"); 4655 printk(KERN_ERR "megasas: INIT adapter done\n");
4578 4656
4579 /** for passthrough 4657 /** for passthrough
@@ -4584,7 +4662,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
4584 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 4662 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
4585 if (megasas_get_pd_list(instance) < 0) { 4663 if (megasas_get_pd_list(instance) < 0) {
4586 printk(KERN_ERR "megasas: failed to get PD list\n"); 4664 printk(KERN_ERR "megasas: failed to get PD list\n");
4587 goto fail_init_adapter; 4665 goto fail_get_pd_list;
4588 } 4666 }
4589 4667
4590 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 4668 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
@@ -4610,17 +4688,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
4610 4688
4611 tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); 4689 tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
4612 4690
4613 /*Check whether controller is iMR or MR */
4614 if (ctrl_info->memory_size) {
4615 instance->is_imr = 0;
4616 dev_info(&instance->pdev->dev, "Controller type: MR,"
4617 "Memory size is: %dMB\n",
4618 le16_to_cpu(ctrl_info->memory_size));
4619 } else {
4620 instance->is_imr = 1;
4621 dev_info(&instance->pdev->dev,
4622 "Controller type: iMR\n");
4623 }
4624 instance->disableOnlineCtrlReset = 4691 instance->disableOnlineCtrlReset =
4625 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 4692 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
4626 instance->mpio = ctrl_info->adapterOperations2.mpio; 4693 instance->mpio = ctrl_info->adapterOperations2.mpio;
@@ -4628,9 +4695,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
4628 ctrl_info->adapterOperations2.supportUnevenSpans; 4695 ctrl_info->adapterOperations2.supportUnevenSpans;
4629 if (instance->UnevenSpanSupport) { 4696 if (instance->UnevenSpanSupport) {
4630 struct fusion_context *fusion = instance->ctrl_context; 4697 struct fusion_context *fusion = instance->ctrl_context;
4631
4632 dev_info(&instance->pdev->dev, "FW supports: "
4633 "UnevenSpanSupport=%x\n", instance->UnevenSpanSupport);
4634 if (MR_ValidateMapInfo(instance)) 4698 if (MR_ValidateMapInfo(instance))
4635 fusion->fast_path_io = 1; 4699 fusion->fast_path_io = 1;
4636 else 4700 else
@@ -4657,13 +4721,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
4657 instance->crash_dump_drv_support = 4721 instance->crash_dump_drv_support =
4658 (instance->crash_dump_fw_support && 4722 (instance->crash_dump_fw_support &&
4659 instance->crash_dump_buf); 4723 instance->crash_dump_buf);
4660 if (instance->crash_dump_drv_support) { 4724 if (instance->crash_dump_drv_support)
4661 dev_info(&instance->pdev->dev, "Firmware Crash dump "
4662 "feature is supported\n");
4663 megasas_set_crash_dump_params(instance, 4725 megasas_set_crash_dump_params(instance,
4664 MR_CRASH_BUF_TURN_OFF); 4726 MR_CRASH_BUF_TURN_OFF);
4665 4727
4666 } else { 4728 else {
4667 if (instance->crash_dump_buf) 4729 if (instance->crash_dump_buf)
4668 pci_free_consistent(instance->pdev, 4730 pci_free_consistent(instance->pdev,
4669 CRASH_DMA_BUF_SIZE, 4731 CRASH_DMA_BUF_SIZE,
@@ -4674,37 +4736,28 @@ static int megasas_init_fw(struct megasas_instance *instance)
4674 4736
4675 instance->secure_jbod_support = 4737 instance->secure_jbod_support =
4676 ctrl_info->adapterOperations3.supportSecurityonJBOD; 4738 ctrl_info->adapterOperations3.supportSecurityonJBOD;
4677 if (instance->secure_jbod_support) 4739
4678 dev_info(&instance->pdev->dev, "Firmware supports Secure JBOD\n"); 4740 dev_info(&instance->pdev->dev,
4741 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
4742 le16_to_cpu(ctrl_info->pci.vendor_id),
4743 le16_to_cpu(ctrl_info->pci.device_id),
4744 le16_to_cpu(ctrl_info->pci.sub_vendor_id),
4745 le16_to_cpu(ctrl_info->pci.sub_device_id));
4746 dev_info(&instance->pdev->dev, "unevenspan support : %s\n",
4747 instance->UnevenSpanSupport ? "yes" : "no");
4748 dev_info(&instance->pdev->dev, "disable ocr : %s\n",
4749 instance->disableOnlineCtrlReset ? "yes" : "no");
4750 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n",
4751 instance->crash_dump_drv_support ? "yes" : "no");
4752 dev_info(&instance->pdev->dev, "secure jbod : %s\n",
4753 instance->secure_jbod_support ? "yes" : "no");
4754
4755
4679 instance->max_sectors_per_req = instance->max_num_sge * 4756 instance->max_sectors_per_req = instance->max_num_sge *
4680 PAGE_SIZE / 512; 4757 PAGE_SIZE / 512;
4681 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 4758 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
4682 instance->max_sectors_per_req = tmp_sectors; 4759 instance->max_sectors_per_req = tmp_sectors;
4683 4760
4684 /*
4685 * 1. For fusion adapters, 3 commands for IOCTL and 5 commands
4686 * for driver's internal DCMDs.
4687 * 2. For MFI skinny adapters, 5 commands for IOCTL + driver's
4688 * internal DCMDs.
4689 * 3. For rest of MFI adapters, 27 commands reserved for IOCTLs
4690 * and 5 commands for drivers's internal DCMD.
4691 */
4692 if (instance->ctrl_context) {
4693 instance->max_scsi_cmds = instance->max_fw_cmds -
4694 (MEGASAS_FUSION_INTERNAL_CMDS +
4695 MEGASAS_FUSION_IOCTL_CMDS);
4696 sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
4697 } else if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4698 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
4699 instance->max_scsi_cmds = instance->max_fw_cmds -
4700 MEGASAS_SKINNY_INT_CMDS;
4701 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
4702 } else {
4703 instance->max_scsi_cmds = instance->max_fw_cmds -
4704 MEGASAS_INT_CMDS;
4705 sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5));
4706 }
4707
4708 /* Check for valid throttlequeuedepth module parameter */ 4761 /* Check for valid throttlequeuedepth module parameter */
4709 if (throttlequeuedepth && 4762 if (throttlequeuedepth &&
4710 throttlequeuedepth <= instance->max_scsi_cmds) 4763 throttlequeuedepth <= instance->max_scsi_cmds)
@@ -4713,12 +4766,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
4713 instance->throttlequeuedepth = 4766 instance->throttlequeuedepth =
4714 MEGASAS_THROTTLE_QUEUE_DEPTH; 4767 MEGASAS_THROTTLE_QUEUE_DEPTH;
4715 4768
4716 /*
4717 * Setup tasklet for cmd completion
4718 */
4719
4720 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
4721 (unsigned long)instance);
4722 4769
4723 /* Launch SR-IOV heartbeat timer */ 4770 /* Launch SR-IOV heartbeat timer */
4724 if (instance->requestorId) { 4771 if (instance->requestorId) {
@@ -4733,7 +4780,14 @@ static int megasas_init_fw(struct megasas_instance *instance)
4733 4780
4734 return 0; 4781 return 0;
4735 4782
4783fail_get_pd_list:
4784 instance->instancet->disable_intr(instance);
4736fail_init_adapter: 4785fail_init_adapter:
4786 megasas_destroy_irqs(instance);
4787fail_setup_irqs:
4788 if (instance->msix_vectors)
4789 pci_disable_msix(instance->pdev);
4790 instance->msix_vectors = 0;
4737fail_ready_state: 4791fail_ready_state:
4738 kfree(instance->ctrl_info); 4792 kfree(instance->ctrl_info);
4739 instance->ctrl_info = NULL; 4793 instance->ctrl_info = NULL;
@@ -4822,21 +4876,17 @@ megasas_get_seq_num(struct megasas_instance *instance,
4822 /* 4876 /*
4823 * Copy the data back into callers buffer 4877 * Copy the data back into callers buffer
4824 */ 4878 */
4825 eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num); 4879 eli->newest_seq_num = el_info->newest_seq_num;
4826 eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num); 4880 eli->oldest_seq_num = el_info->oldest_seq_num;
4827 eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num); 4881 eli->clear_seq_num = el_info->clear_seq_num;
4828 eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num); 4882 eli->shutdown_seq_num = el_info->shutdown_seq_num;
4829 eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num); 4883 eli->boot_seq_num = el_info->boot_seq_num;
4830 } 4884 }
4831 4885
4832 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), 4886 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
4833 el_info, el_info_h); 4887 el_info, el_info_h);
4834 4888
4835 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) 4889 megasas_return_cmd(instance, cmd);
4836 megasas_return_mfi_mpt_pthr(instance, cmd,
4837 cmd->mpt_pthr_cmd_blocked);
4838 else
4839 megasas_return_cmd(instance, cmd);
4840 4890
4841 return 0; 4891 return 0;
4842} 4892}
@@ -4877,8 +4927,8 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
4877 4927
4878 if (instance->aen_cmd) { 4928 if (instance->aen_cmd) {
4879 4929
4880 prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; 4930 prev_aen.word =
4881 prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale); 4931 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
4882 4932
4883 /* 4933 /*
4884 * A class whose enum value is smaller is inclusive of all 4934 * A class whose enum value is smaller is inclusive of all
@@ -4990,7 +5040,7 @@ static int megasas_start_aen(struct megasas_instance *instance)
4990 class_locale.members.class = MR_EVT_CLASS_DEBUG; 5040 class_locale.members.class = MR_EVT_CLASS_DEBUG;
4991 5041
4992 return megasas_register_aen(instance, 5042 return megasas_register_aen(instance,
4993 eli.newest_seq_num + 1, 5043 le32_to_cpu(eli.newest_seq_num) + 1,
4994 class_locale.word); 5044 class_locale.word);
4995} 5045}
4996 5046
@@ -5001,6 +5051,7 @@ static int megasas_start_aen(struct megasas_instance *instance)
5001static int megasas_io_attach(struct megasas_instance *instance) 5051static int megasas_io_attach(struct megasas_instance *instance)
5002{ 5052{
5003 struct Scsi_Host *host = instance->host; 5053 struct Scsi_Host *host = instance->host;
5054 u32 error;
5004 5055
5005 /* 5056 /*
5006 * Export parameters required by SCSI mid-layer 5057 * Export parameters required by SCSI mid-layer
@@ -5050,12 +5101,21 @@ static int megasas_io_attach(struct megasas_instance *instance)
5050 host->hostt->eh_device_reset_handler = NULL; 5101 host->hostt->eh_device_reset_handler = NULL;
5051 host->hostt->eh_bus_reset_handler = NULL; 5102 host->hostt->eh_bus_reset_handler = NULL;
5052 } 5103 }
5104 error = scsi_init_shared_tag_map(host, host->can_queue);
5105 if (error) {
5106 dev_err(&instance->pdev->dev,
5107 "Failed to shared tag from %s %d\n",
5108 __func__, __LINE__);
5109 return -ENODEV;
5110 }
5053 5111
5054 /* 5112 /*
5055 * Notify the mid-layer about the new controller 5113 * Notify the mid-layer about the new controller
5056 */ 5114 */
5057 if (scsi_add_host(host, &instance->pdev->dev)) { 5115 if (scsi_add_host(host, &instance->pdev->dev)) {
5058 printk(KERN_DEBUG "megasas: scsi_add_host failed\n"); 5116 dev_err(&instance->pdev->dev,
5117 "Failed to add host from %s %d\n",
5118 __func__, __LINE__);
5059 return -ENODEV; 5119 return -ENODEV;
5060 } 5120 }
5061 5121
@@ -5106,7 +5166,7 @@ fail_set_dma_mask:
5106static int megasas_probe_one(struct pci_dev *pdev, 5166static int megasas_probe_one(struct pci_dev *pdev,
5107 const struct pci_device_id *id) 5167 const struct pci_device_id *id)
5108{ 5168{
5109 int rval, pos, i, j, cpu; 5169 int rval, pos;
5110 struct Scsi_Host *host; 5170 struct Scsi_Host *host;
5111 struct megasas_instance *instance; 5171 struct megasas_instance *instance;
5112 u16 control = 0; 5172 u16 control = 0;
@@ -5129,16 +5189,6 @@ static int megasas_probe_one(struct pci_dev *pdev,
5129 } 5189 }
5130 5190
5131 /* 5191 /*
5132 * Announce PCI information
5133 */
5134 printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
5135 pdev->vendor, pdev->device, pdev->subsystem_vendor,
5136 pdev->subsystem_device);
5137
5138 printk("bus %d:slot %d:func %d\n",
5139 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
5140
5141 /*
5142 * PCI prepping: enable device set bus mastering and dma mask 5192 * PCI prepping: enable device set bus mastering and dma mask
5143 */ 5193 */
5144 rval = pci_enable_device_mem(pdev); 5194 rval = pci_enable_device_mem(pdev);
@@ -5183,8 +5233,6 @@ static int megasas_probe_one(struct pci_dev *pdev,
5183 fusion = instance->ctrl_context; 5233 fusion = instance->ctrl_context;
5184 memset(fusion, 0, 5234 memset(fusion, 0,
5185 ((1 << PAGE_SHIFT) << instance->ctrl_context_pages)); 5235 ((1 << PAGE_SHIFT) << instance->ctrl_context_pages));
5186 INIT_LIST_HEAD(&fusion->cmd_pool);
5187 spin_lock_init(&fusion->mpt_pool_lock);
5188 } 5236 }
5189 break; 5237 break;
5190 default: /* For all other supported controllers */ 5238 default: /* For all other supported controllers */
@@ -5207,6 +5255,13 @@ static int megasas_probe_one(struct pci_dev *pdev,
5207 break; 5255 break;
5208 } 5256 }
5209 5257
5258 instance->system_info_buf = pci_zalloc_consistent(pdev,
5259 sizeof(struct MR_DRV_SYSTEM_INFO),
5260 &instance->system_info_h);
5261
5262 if (!instance->system_info_buf)
5263 dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
5264
5210 /* Crash dump feature related initialisation*/ 5265 /* Crash dump feature related initialisation*/
5211 instance->drv_buf_index = 0; 5266 instance->drv_buf_index = 0;
5212 instance->drv_buf_alloc = 0; 5267 instance->drv_buf_alloc = 0;
@@ -5315,55 +5370,6 @@ static int megasas_probe_one(struct pci_dev *pdev,
5315 } 5370 }
5316 } 5371 }
5317 5372
5318retry_irq_register:
5319 /*
5320 * Register IRQ
5321 */
5322 if (instance->msix_vectors) {
5323 cpu = cpumask_first(cpu_online_mask);
5324 for (i = 0; i < instance->msix_vectors; i++) {
5325 instance->irq_context[i].instance = instance;
5326 instance->irq_context[i].MSIxIndex = i;
5327 if (request_irq(instance->msixentry[i].vector,
5328 instance->instancet->service_isr, 0,
5329 "megasas",
5330 &instance->irq_context[i])) {
5331 printk(KERN_DEBUG "megasas: Failed to "
5332 "register IRQ for vector %d.\n", i);
5333 for (j = 0; j < i; j++) {
5334 if (smp_affinity_enable)
5335 irq_set_affinity_hint(
5336 instance->msixentry[j].vector, NULL);
5337 free_irq(
5338 instance->msixentry[j].vector,
5339 &instance->irq_context[j]);
5340 }
5341 /* Retry irq register for IO_APIC */
5342 instance->msix_vectors = 0;
5343 goto retry_irq_register;
5344 }
5345 if (smp_affinity_enable) {
5346 if (irq_set_affinity_hint(instance->msixentry[i].vector,
5347 get_cpu_mask(cpu)))
5348 dev_err(&instance->pdev->dev,
5349 "Error setting affinity hint "
5350 "for cpu %d\n", cpu);
5351 cpu = cpumask_next(cpu, cpu_online_mask);
5352 }
5353 }
5354 } else {
5355 instance->irq_context[0].instance = instance;
5356 instance->irq_context[0].MSIxIndex = 0;
5357 if (request_irq(pdev->irq, instance->instancet->service_isr,
5358 IRQF_SHARED, "megasas",
5359 &instance->irq_context[0])) {
5360 printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
5361 goto fail_irq;
5362 }
5363 }
5364
5365 instance->instancet->enable_intr(instance);
5366
5367 /* 5373 /*
5368 * Store instance in PCI softstate 5374 * Store instance in PCI softstate
5369 */ 5375 */
@@ -5410,17 +5416,8 @@ retry_irq_register:
5410 megasas_mgmt_info.max_index--; 5416 megasas_mgmt_info.max_index--;
5411 5417
5412 instance->instancet->disable_intr(instance); 5418 instance->instancet->disable_intr(instance);
5413 if (instance->msix_vectors) 5419 megasas_destroy_irqs(instance);
5414 for (i = 0; i < instance->msix_vectors; i++) { 5420
5415 if (smp_affinity_enable)
5416 irq_set_affinity_hint(
5417 instance->msixentry[i].vector, NULL);
5418 free_irq(instance->msixentry[i].vector,
5419 &instance->irq_context[i]);
5420 }
5421 else
5422 free_irq(instance->pdev->irq, &instance->irq_context[0]);
5423fail_irq:
5424 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || 5421 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
5425 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) || 5422 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
5426 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 5423 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
@@ -5428,9 +5425,9 @@ fail_irq:
5428 megasas_release_fusion(instance); 5425 megasas_release_fusion(instance);
5429 else 5426 else
5430 megasas_release_mfi(instance); 5427 megasas_release_mfi(instance);
5431 fail_init_mfi:
5432 if (instance->msix_vectors) 5428 if (instance->msix_vectors)
5433 pci_disable_msix(instance->pdev); 5429 pci_disable_msix(instance->pdev);
5430fail_init_mfi:
5434 fail_alloc_dma_buf: 5431 fail_alloc_dma_buf:
5435 if (instance->evt_detail) 5432 if (instance->evt_detail)
5436 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 5433 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
@@ -5487,11 +5484,7 @@ static void megasas_flush_cache(struct megasas_instance *instance)
5487 dev_err(&instance->pdev->dev, "Command timedout" 5484 dev_err(&instance->pdev->dev, "Command timedout"
5488 " from %s\n", __func__); 5485 " from %s\n", __func__);
5489 5486
5490 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) 5487 megasas_return_cmd(instance, cmd);
5491 megasas_return_mfi_mpt_pthr(instance, cmd,
5492 cmd->mpt_pthr_cmd_blocked);
5493 else
5494 megasas_return_cmd(instance, cmd);
5495 5488
5496 return; 5489 return;
5497} 5490}
@@ -5538,11 +5531,7 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
5538 dev_err(&instance->pdev->dev, "Command timedout" 5531 dev_err(&instance->pdev->dev, "Command timedout"
5539 "from %s\n", __func__); 5532 "from %s\n", __func__);
5540 5533
5541 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) 5534 megasas_return_cmd(instance, cmd);
5542 megasas_return_mfi_mpt_pthr(instance, cmd,
5543 cmd->mpt_pthr_cmd_blocked);
5544 else
5545 megasas_return_cmd(instance, cmd);
5546 5535
5547 return; 5536 return;
5548} 5537}
@@ -5558,7 +5547,6 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
5558{ 5547{
5559 struct Scsi_Host *host; 5548 struct Scsi_Host *host;
5560 struct megasas_instance *instance; 5549 struct megasas_instance *instance;
5561 int i;
5562 5550
5563 instance = pci_get_drvdata(pdev); 5551 instance = pci_get_drvdata(pdev);
5564 host = instance->host; 5552 host = instance->host;
@@ -5583,16 +5571,8 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
5583 pci_set_drvdata(instance->pdev, instance); 5571 pci_set_drvdata(instance->pdev, instance);
5584 instance->instancet->disable_intr(instance); 5572 instance->instancet->disable_intr(instance);
5585 5573
5586 if (instance->msix_vectors) 5574 megasas_destroy_irqs(instance);
5587 for (i = 0; i < instance->msix_vectors; i++) { 5575
5588 if (smp_affinity_enable)
5589 irq_set_affinity_hint(
5590 instance->msixentry[i].vector, NULL);
5591 free_irq(instance->msixentry[i].vector,
5592 &instance->irq_context[i]);
5593 }
5594 else
5595 free_irq(instance->pdev->irq, &instance->irq_context[0]);
5596 if (instance->msix_vectors) 5576 if (instance->msix_vectors)
5597 pci_disable_msix(instance->pdev); 5577 pci_disable_msix(instance->pdev);
5598 5578
@@ -5611,7 +5591,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
5611static int 5591static int
5612megasas_resume(struct pci_dev *pdev) 5592megasas_resume(struct pci_dev *pdev)
5613{ 5593{
5614 int rval, i, j, cpu; 5594 int rval;
5615 struct Scsi_Host *host; 5595 struct Scsi_Host *host;
5616 struct megasas_instance *instance; 5596 struct megasas_instance *instance;
5617 5597
@@ -5681,50 +5661,10 @@ megasas_resume(struct pci_dev *pdev)
5681 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 5661 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
5682 (unsigned long)instance); 5662 (unsigned long)instance);
5683 5663
5684 /* 5664 if (instance->msix_vectors ?
5685 * Register IRQ 5665 megasas_setup_irqs_msix(instance, 0) :
5686 */ 5666 megasas_setup_irqs_ioapic(instance))
5687 if (instance->msix_vectors) { 5667 goto fail_init_mfi;
5688 cpu = cpumask_first(cpu_online_mask);
5689 for (i = 0 ; i < instance->msix_vectors; i++) {
5690 instance->irq_context[i].instance = instance;
5691 instance->irq_context[i].MSIxIndex = i;
5692 if (request_irq(instance->msixentry[i].vector,
5693 instance->instancet->service_isr, 0,
5694 "megasas",
5695 &instance->irq_context[i])) {
5696 printk(KERN_DEBUG "megasas: Failed to "
5697 "register IRQ for vector %d.\n", i);
5698 for (j = 0; j < i; j++) {
5699 if (smp_affinity_enable)
5700 irq_set_affinity_hint(
5701 instance->msixentry[j].vector, NULL);
5702 free_irq(
5703 instance->msixentry[j].vector,
5704 &instance->irq_context[j]);
5705 }
5706 goto fail_irq;
5707 }
5708
5709 if (smp_affinity_enable) {
5710 if (irq_set_affinity_hint(instance->msixentry[i].vector,
5711 get_cpu_mask(cpu)))
5712 dev_err(&instance->pdev->dev, "Error "
5713 "setting affinity hint for cpu "
5714 "%d\n", cpu);
5715 cpu = cpumask_next(cpu, cpu_online_mask);
5716 }
5717 }
5718 } else {
5719 instance->irq_context[0].instance = instance;
5720 instance->irq_context[0].MSIxIndex = 0;
5721 if (request_irq(pdev->irq, instance->instancet->service_isr,
5722 IRQF_SHARED, "megasas",
5723 &instance->irq_context[0])) {
5724 printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
5725 goto fail_irq;
5726 }
5727 }
5728 5668
5729 /* Re-launch SR-IOV heartbeat timer */ 5669 /* Re-launch SR-IOV heartbeat timer */
5730 if (instance->requestorId) { 5670 if (instance->requestorId) {
@@ -5733,8 +5673,10 @@ megasas_resume(struct pci_dev *pdev)
5733 &instance->sriov_heartbeat_timer, 5673 &instance->sriov_heartbeat_timer,
5734 megasas_sriov_heartbeat_handler, 5674 megasas_sriov_heartbeat_handler,
5735 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 5675 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
5736 else 5676 else {
5737 instance->skip_heartbeat_timer_del = 1; 5677 instance->skip_heartbeat_timer_del = 1;
5678 goto fail_init_mfi;
5679 }
5738 } 5680 }
5739 5681
5740 instance->instancet->enable_intr(instance); 5682 instance->instancet->enable_intr(instance);
@@ -5748,7 +5690,6 @@ megasas_resume(struct pci_dev *pdev)
5748 5690
5749 return 0; 5691 return 0;
5750 5692
5751fail_irq:
5752fail_init_mfi: 5693fail_init_mfi:
5753 if (instance->evt_detail) 5694 if (instance->evt_detail)
5754 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 5695 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
@@ -5829,16 +5770,8 @@ static void megasas_detach_one(struct pci_dev *pdev)
5829 5770
5830 instance->instancet->disable_intr(instance); 5771 instance->instancet->disable_intr(instance);
5831 5772
5832 if (instance->msix_vectors) 5773 megasas_destroy_irqs(instance);
5833 for (i = 0; i < instance->msix_vectors; i++) { 5774
5834 if (smp_affinity_enable)
5835 irq_set_affinity_hint(
5836 instance->msixentry[i].vector, NULL);
5837 free_irq(instance->msixentry[i].vector,
5838 &instance->irq_context[i]);
5839 }
5840 else
5841 free_irq(instance->pdev->irq, &instance->irq_context[0]);
5842 if (instance->msix_vectors) 5775 if (instance->msix_vectors)
5843 pci_disable_msix(instance->pdev); 5776 pci_disable_msix(instance->pdev);
5844 5777
@@ -5899,6 +5832,10 @@ static void megasas_detach_one(struct pci_dev *pdev)
5899 pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE, 5832 pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
5900 instance->crash_dump_buf, instance->crash_dump_h); 5833 instance->crash_dump_buf, instance->crash_dump_h);
5901 5834
5835 if (instance->system_info_buf)
5836 pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
5837 instance->system_info_buf, instance->system_info_h);
5838
5902 scsi_host_put(host); 5839 scsi_host_put(host);
5903 5840
5904 pci_disable_device(pdev); 5841 pci_disable_device(pdev);
@@ -5912,23 +5849,14 @@ static void megasas_detach_one(struct pci_dev *pdev)
5912 */ 5849 */
5913static void megasas_shutdown(struct pci_dev *pdev) 5850static void megasas_shutdown(struct pci_dev *pdev)
5914{ 5851{
5915 int i;
5916 struct megasas_instance *instance = pci_get_drvdata(pdev); 5852 struct megasas_instance *instance = pci_get_drvdata(pdev);
5917 5853
5918 instance->unload = 1; 5854 instance->unload = 1;
5919 megasas_flush_cache(instance); 5855 megasas_flush_cache(instance);
5920 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 5856 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
5921 instance->instancet->disable_intr(instance); 5857 instance->instancet->disable_intr(instance);
5922 if (instance->msix_vectors) 5858 megasas_destroy_irqs(instance);
5923 for (i = 0; i < instance->msix_vectors; i++) { 5859
5924 if (smp_affinity_enable)
5925 irq_set_affinity_hint(
5926 instance->msixentry[i].vector, NULL);
5927 free_irq(instance->msixentry[i].vector,
5928 &instance->irq_context[i]);
5929 }
5930 else
5931 free_irq(instance->pdev->irq, &instance->irq_context[0]);
5932 if (instance->msix_vectors) 5860 if (instance->msix_vectors)
5933 pci_disable_msix(instance->pdev); 5861 pci_disable_msix(instance->pdev);
5934} 5862}
@@ -6211,11 +6139,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6211 kbuff_arr[i] = NULL; 6139 kbuff_arr[i] = NULL;
6212 } 6140 }
6213 6141
6214 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) 6142 megasas_return_cmd(instance, cmd);
6215 megasas_return_mfi_mpt_pthr(instance, cmd,
6216 cmd->mpt_pthr_cmd_blocked);
6217 else
6218 megasas_return_cmd(instance, cmd);
6219 return error; 6143 return error;
6220} 6144}
6221 6145
@@ -6502,6 +6426,15 @@ static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
6502static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL); 6426static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
6503 6427
6504static ssize_t 6428static ssize_t
6429megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
6430{
6431 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
6432 MEGASAS_RELDATE);
6433}
6434
6435static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL);
6436
6437static ssize_t
6505megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf) 6438megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
6506{ 6439{
6507 return sprintf(buf, "%u\n", support_poll_for_event); 6440 return sprintf(buf, "%u\n", support_poll_for_event);
@@ -6841,6 +6774,11 @@ static int __init megasas_init(void)
6841 goto err_dcf_attr_ver; 6774 goto err_dcf_attr_ver;
6842 6775
6843 rval = driver_create_file(&megasas_pci_driver.driver, 6776 rval = driver_create_file(&megasas_pci_driver.driver,
6777 &driver_attr_release_date);
6778 if (rval)
6779 goto err_dcf_rel_date;
6780
6781 rval = driver_create_file(&megasas_pci_driver.driver,
6844 &driver_attr_support_poll_for_event); 6782 &driver_attr_support_poll_for_event);
6845 if (rval) 6783 if (rval)
6846 goto err_dcf_support_poll_for_event; 6784 goto err_dcf_support_poll_for_event;
@@ -6863,6 +6801,9 @@ err_dcf_dbg_lvl:
6863 driver_remove_file(&megasas_pci_driver.driver, 6801 driver_remove_file(&megasas_pci_driver.driver,
6864 &driver_attr_support_poll_for_event); 6802 &driver_attr_support_poll_for_event);
6865err_dcf_support_poll_for_event: 6803err_dcf_support_poll_for_event:
6804 driver_remove_file(&megasas_pci_driver.driver,
6805 &driver_attr_release_date);
6806err_dcf_rel_date:
6866 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 6807 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
6867err_dcf_attr_ver: 6808err_dcf_attr_ver:
6868 pci_unregister_driver(&megasas_pci_driver); 6809 pci_unregister_driver(&megasas_pci_driver);
@@ -6882,6 +6823,8 @@ static void __exit megasas_exit(void)
6882 &driver_attr_support_poll_for_event); 6823 &driver_attr_support_poll_for_event);
6883 driver_remove_file(&megasas_pci_driver.driver, 6824 driver_remove_file(&megasas_pci_driver.driver,
6884 &driver_attr_support_device_change); 6825 &driver_attr_support_device_change);
6826 driver_remove_file(&megasas_pci_driver.driver,
6827 &driver_attr_release_date);
6885 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 6828 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
6886 6829
6887 pci_unregister_driver(&megasas_pci_driver); 6830 pci_unregister_driver(&megasas_pci_driver);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index e8b7a69428b6..be57b18675a4 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -150,7 +150,7 @@ u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
150 return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); 150 return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
151} 151}
152 152
153u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) 153__le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
154{ 154{
155 return map->raidMap.devHndlInfo[pd].curDevHdl; 155 return map->raidMap.devHndlInfo[pd].curDevHdl;
156} 156}
@@ -743,7 +743,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
743 u8 retval = TRUE; 743 u8 retval = TRUE;
744 u8 do_invader = 0; 744 u8 do_invader = 0;
745 u64 *pdBlock = &io_info->pdBlock; 745 u64 *pdBlock = &io_info->pdBlock;
746 u16 *pDevHandle = &io_info->devHandle; 746 __le16 *pDevHandle = &io_info->devHandle;
747 u32 logArm, rowMod, armQ, arm; 747 u32 logArm, rowMod, armQ, arm;
748 748
749 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER || 749 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
@@ -777,7 +777,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
777 if (pd != MR_PD_INVALID) 777 if (pd != MR_PD_INVALID)
778 *pDevHandle = MR_PdDevHandleGet(pd, map); 778 *pDevHandle = MR_PdDevHandleGet(pd, map);
779 else { 779 else {
780 *pDevHandle = MR_PD_INVALID; 780 *pDevHandle = cpu_to_le16(MR_PD_INVALID);
781 if ((raid->level >= 5) && 781 if ((raid->level >= 5) &&
782 (!do_invader || (do_invader && 782 (!do_invader || (do_invader &&
783 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) 783 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
@@ -825,7 +825,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
825 u8 retval = TRUE; 825 u8 retval = TRUE;
826 u8 do_invader = 0; 826 u8 do_invader = 0;
827 u64 *pdBlock = &io_info->pdBlock; 827 u64 *pdBlock = &io_info->pdBlock;
828 u16 *pDevHandle = &io_info->devHandle; 828 __le16 *pDevHandle = &io_info->devHandle;
829 829
830 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER || 830 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
831 instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) 831 instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
@@ -872,7 +872,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
872 /* Get dev handle from Pd. */ 872 /* Get dev handle from Pd. */
873 *pDevHandle = MR_PdDevHandleGet(pd, map); 873 *pDevHandle = MR_PdDevHandleGet(pd, map);
874 else { 874 else {
875 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ 875 /* set dev handle as invalid. */
876 *pDevHandle = cpu_to_le16(MR_PD_INVALID);
876 if ((raid->level >= 5) && 877 if ((raid->level >= 5) &&
877 (!do_invader || (do_invader && 878 (!do_invader || (do_invader &&
878 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) 879 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
@@ -1117,7 +1118,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
1117 ref_in_start_stripe, io_info, 1118 ref_in_start_stripe, io_info,
1118 pRAID_Context, map); 1119 pRAID_Context, map);
1119 /* If IO on an invalid Pd, then FP is not possible.*/ 1120 /* If IO on an invalid Pd, then FP is not possible.*/
1120 if (io_info->devHandle == MR_PD_INVALID) 1121 if (io_info->devHandle == cpu_to_le16(MR_PD_INVALID))
1121 io_info->fpOkForIo = FALSE; 1122 io_info->fpOkForIo = FALSE;
1122 return retval; 1123 return retval;
1123 } else if (isRead) { 1124 } else if (isRead) {
@@ -1349,11 +1350,11 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
1349 return io_info->pd_after_lb; 1350 return io_info->pd_after_lb;
1350} 1351}
1351 1352
1352u16 get_updated_dev_handle(struct megasas_instance *instance, 1353__le16 get_updated_dev_handle(struct megasas_instance *instance,
1353 struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info) 1354 struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
1354{ 1355{
1355 u8 arm_pd; 1356 u8 arm_pd;
1356 u16 devHandle; 1357 __le16 devHandle;
1357 struct fusion_context *fusion; 1358 struct fusion_context *fusion;
1358 struct MR_DRV_RAID_MAP_ALL *drv_map; 1359 struct MR_DRV_RAID_MAP_ALL *drv_map;
1359 1360
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index dba4de04de3c..46a0f8f4f677 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -53,10 +53,12 @@
53#include <scsi/scsi_device.h> 53#include <scsi/scsi_device.h>
54#include <scsi/scsi_host.h> 54#include <scsi/scsi_host.h>
55#include <scsi/scsi_dbg.h> 55#include <scsi/scsi_dbg.h>
56#include <linux/dmi.h>
56 57
57#include "megaraid_sas_fusion.h" 58#include "megaraid_sas_fusion.h"
58#include "megaraid_sas.h" 59#include "megaraid_sas.h"
59 60
61
60extern void megasas_free_cmds(struct megasas_instance *instance); 62extern void megasas_free_cmds(struct megasas_instance *instance);
61extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance 63extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
62 *instance); 64 *instance);
@@ -156,28 +158,15 @@ megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
156 * megasas_get_cmd_fusion - Get a command from the free pool 158 * megasas_get_cmd_fusion - Get a command from the free pool
157 * @instance: Adapter soft state 159 * @instance: Adapter soft state
158 * 160 *
159 * Returns a free command from the pool 161 * Returns a blk_tag indexed mpt frame
160 */ 162 */
161struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance 163inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
162 *instance) 164 *instance, u32 blk_tag)
163{ 165{
164 unsigned long flags; 166 struct fusion_context *fusion;
165 struct fusion_context *fusion =
166 (struct fusion_context *)instance->ctrl_context;
167 struct megasas_cmd_fusion *cmd = NULL;
168
169 spin_lock_irqsave(&fusion->mpt_pool_lock, flags);
170
171 if (!list_empty(&fusion->cmd_pool)) {
172 cmd = list_entry((&fusion->cmd_pool)->next,
173 struct megasas_cmd_fusion, list);
174 list_del_init(&cmd->list);
175 } else {
176 printk(KERN_ERR "megasas: Command pool (fusion) empty!\n");
177 }
178 167
179 spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags); 168 fusion = instance->ctrl_context;
180 return cmd; 169 return fusion->cmd_list[blk_tag];
181} 170}
182 171
183/** 172/**
@@ -188,47 +177,35 @@ struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
188inline void megasas_return_cmd_fusion(struct megasas_instance *instance, 177inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
189 struct megasas_cmd_fusion *cmd) 178 struct megasas_cmd_fusion *cmd)
190{ 179{
191 unsigned long flags;
192 struct fusion_context *fusion =
193 (struct fusion_context *)instance->ctrl_context;
194
195 spin_lock_irqsave(&fusion->mpt_pool_lock, flags);
196
197 cmd->scmd = NULL; 180 cmd->scmd = NULL;
198 cmd->sync_cmd_idx = (u32)ULONG_MAX;
199 memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); 181 memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
200 list_add(&cmd->list, (&fusion->cmd_pool)->next);
201
202 spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
203} 182}
204 183
205/** 184/**
206 * megasas_return_mfi_mpt_pthr - Return a mfi and mpt to free command pool 185 * megasas_fire_cmd_fusion - Sends command to the FW
207 * @instance: Adapter soft state
208 * @cmd_mfi: MFI Command packet to be returned to free command pool
209 * @cmd_mpt: MPT Command packet to be returned to free command pool
210 */ 186 */
211inline void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance, 187static void
212 struct megasas_cmd *cmd_mfi, 188megasas_fire_cmd_fusion(struct megasas_instance *instance,
213 struct megasas_cmd_fusion *cmd_fusion) 189 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
214{ 190{
191#if defined(writeq) && defined(CONFIG_64BIT)
192 u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
193 le32_to_cpu(req_desc->u.low));
194
195 writeq(req_data, &instance->reg_set->inbound_low_queue_port);
196#else
215 unsigned long flags; 197 unsigned long flags;
216 198
217 /* 199 spin_lock_irqsave(&instance->hba_lock, flags);
218 * TO DO: optimize this code and use only one lock instead of two 200 writel(le32_to_cpu(req_desc->u.low),
219 * locks being used currently- mpt_pool_lock is acquired 201 &instance->reg_set->inbound_low_queue_port);
220 * inside mfi_pool_lock 202 writel(le32_to_cpu(req_desc->u.high),
221 */ 203 &instance->reg_set->inbound_high_queue_port);
222 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 204 spin_unlock_irqrestore(&instance->hba_lock, flags);
223 megasas_return_cmd_fusion(instance, cmd_fusion); 205#endif
224 if (atomic_read(&cmd_mfi->mfi_mpt_pthr) != MFI_MPT_ATTACHED)
225 dev_err(&instance->pdev->dev, "Possible bug from %s %d\n",
226 __func__, __LINE__);
227 atomic_set(&cmd_mfi->mfi_mpt_pthr, MFI_MPT_DETACHED);
228 __megasas_return_cmd(instance, cmd_mfi);
229 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
230} 206}
231 207
208
232/** 209/**
233 * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool 210 * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool
234 * @instance: Adapter soft state 211 * @instance: Adapter soft state
@@ -326,7 +303,6 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
326 kfree(fusion->cmd_list); 303 kfree(fusion->cmd_list);
327 fusion->cmd_list = NULL; 304 fusion->cmd_list = NULL;
328 305
329 INIT_LIST_HEAD(&fusion->cmd_pool);
330} 306}
331 307
332/** 308/**
@@ -464,7 +440,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
464 440
465 reply_desc = fusion->reply_frames_desc; 441 reply_desc = fusion->reply_frames_desc;
466 for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++) 442 for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
467 reply_desc->Words = ULLONG_MAX; 443 reply_desc->Words = cpu_to_le64(ULLONG_MAX);
468 444
469 io_frames_sz = fusion->io_frames_alloc_sz; 445 io_frames_sz = fusion->io_frames_alloc_sz;
470 446
@@ -535,7 +511,9 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
535 memset(cmd, 0, sizeof(struct megasas_cmd_fusion)); 511 memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
536 cmd->index = i + 1; 512 cmd->index = i + 1;
537 cmd->scmd = NULL; 513 cmd->scmd = NULL;
538 cmd->sync_cmd_idx = (u32)ULONG_MAX; /* Set to Invalid */ 514 cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds) ?
515 (i - instance->max_scsi_cmds) :
516 (u32)ULONG_MAX; /* Set to Invalid */
539 cmd->instance = instance; 517 cmd->instance = instance;
540 cmd->io_request = 518 cmd->io_request =
541 (struct MPI2_RAID_SCSI_IO_REQUEST *) 519 (struct MPI2_RAID_SCSI_IO_REQUEST *)
@@ -543,8 +521,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
543 memset(cmd->io_request, 0, 521 memset(cmd->io_request, 0,
544 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); 522 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
545 cmd->io_request_phys_addr = io_req_base_phys + offset; 523 cmd->io_request_phys_addr = io_req_base_phys + offset;
546
547 list_add_tail(&cmd->list, &fusion->cmd_pool);
548 } 524 }
549 525
550 /* 526 /*
@@ -605,14 +581,11 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
605 msleep(20); 581 msleep(20);
606 } 582 }
607 583
608 if (frame_hdr->cmd_status == 0xff) { 584 if (frame_hdr->cmd_status == 0xff)
609 if (fusion)
610 megasas_return_mfi_mpt_pthr(instance, cmd,
611 cmd->mpt_pthr_cmd_blocked);
612 return -ETIME; 585 return -ETIME;
613 }
614 586
615 return 0; 587 return (frame_hdr->cmd_status == MFI_STAT_OK) ?
588 0 : 1;
616} 589}
617 590
618/** 591/**
@@ -633,6 +606,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
633 union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc; 606 union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
634 int i; 607 int i;
635 struct megasas_header *frame_hdr; 608 struct megasas_header *frame_hdr;
609 const char *sys_info;
636 610
637 fusion = instance->ctrl_context; 611 fusion = instance->ctrl_context;
638 612
@@ -673,7 +647,9 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
673 647
674 frame_hdr = &cmd->frame->hdr; 648 frame_hdr = &cmd->frame->hdr;
675 frame_hdr->cmd_status = 0xFF; 649 frame_hdr->cmd_status = 0xFF;
676 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 650 frame_hdr->flags = cpu_to_le16(
651 le16_to_cpu(frame_hdr->flags) |
652 MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
677 653
678 init_frame->cmd = MFI_CMD_INIT; 654 init_frame->cmd = MFI_CMD_INIT;
679 init_frame->cmd_status = 0xFF; 655 init_frame->cmd_status = 0xFF;
@@ -695,6 +671,16 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
695 /* Convert capability to LE32 */ 671 /* Convert capability to LE32 */
696 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); 672 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
697 673
674 sys_info = dmi_get_system_info(DMI_PRODUCT_UUID);
675 if (instance->system_info_buf && sys_info) {
676 memcpy(instance->system_info_buf->systemId, sys_info,
677 strlen(sys_info) > 64 ? 64 : strlen(sys_info));
678 instance->system_info_buf->systemIdLength =
679 strlen(sys_info) > 64 ? 64 : strlen(sys_info);
680 init_frame->system_info_lo = instance->system_info_h;
681 init_frame->system_info_hi = 0;
682 }
683
698 init_frame->queue_info_new_phys_addr_hi = 684 init_frame->queue_info_new_phys_addr_hi =
699 cpu_to_le32(upper_32_bits(ioc_init_handle)); 685 cpu_to_le32(upper_32_bits(ioc_init_handle));
700 init_frame->queue_info_new_phys_addr_lo = 686 init_frame->queue_info_new_phys_addr_lo =
@@ -719,8 +705,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
719 break; 705 break;
720 } 706 }
721 707
722 instance->instancet->fire_cmd(instance, req_desc.u.low, 708 megasas_fire_cmd_fusion(instance, &req_desc);
723 req_desc.u.high, instance->reg_set);
724 709
725 wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); 710 wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
726 711
@@ -820,11 +805,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
820 else 805 else
821 ret = megasas_issue_polled(instance, cmd); 806 ret = megasas_issue_polled(instance, cmd);
822 807
823 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) 808 megasas_return_cmd(instance, cmd);
824 megasas_return_mfi_mpt_pthr(instance, cmd,
825 cmd->mpt_pthr_cmd_blocked);
826 else
827 megasas_return_cmd(instance, cmd);
828 809
829 return ret; 810 return ret;
830} 811}
@@ -1061,6 +1042,15 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1061 fusion->last_reply_idx[i] = 0; 1042 fusion->last_reply_idx[i] = 0;
1062 1043
1063 /* 1044 /*
1045 * For fusion adapters, 3 commands for IOCTL and 5 commands
1046 * for driver's internal DCMDs.
1047 */
1048 instance->max_scsi_cmds = instance->max_fw_cmds -
1049 (MEGASAS_FUSION_INTERNAL_CMDS +
1050 MEGASAS_FUSION_IOCTL_CMDS);
1051 sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
1052
1053 /*
1064 * Allocate memory for descriptors 1054 * Allocate memory for descriptors
1065 * Create a pool of commands 1055 * Create a pool of commands
1066 */ 1056 */
@@ -1131,34 +1121,6 @@ fail_alloc_mfi_cmds:
1131} 1121}
1132 1122
1133/** 1123/**
1134 * megasas_fire_cmd_fusion - Sends command to the FW
1135 * @frame_phys_addr : Physical address of cmd
1136 * @frame_count : Number of frames for the command
1137 * @regs : MFI register set
1138 */
1139void
1140megasas_fire_cmd_fusion(struct megasas_instance *instance,
1141 dma_addr_t req_desc_lo,
1142 u32 req_desc_hi,
1143 struct megasas_register_set __iomem *regs)
1144{
1145#if defined(writeq) && defined(CONFIG_64BIT)
1146 u64 req_data = (((u64)le32_to_cpu(req_desc_hi) << 32) |
1147 le32_to_cpu(req_desc_lo));
1148
1149 writeq(req_data, &(regs)->inbound_low_queue_port);
1150#else
1151 unsigned long flags;
1152
1153 spin_lock_irqsave(&instance->hba_lock, flags);
1154
1155 writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port);
1156 writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port);
1157 spin_unlock_irqrestore(&instance->hba_lock, flags);
1158#endif
1159}
1160
1161/**
1162 * map_cmd_status - Maps FW cmd status to OS cmd status 1124 * map_cmd_status - Maps FW cmd status to OS cmd status
1163 * @cmd : Pointer to cmd 1125 * @cmd : Pointer to cmd
1164 * @status : status of cmd returned by FW 1126 * @status : status of cmd returned by FW
@@ -1497,7 +1459,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1497 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1459 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1498 u8 *raidLUN; 1460 u8 *raidLUN;
1499 1461
1500 device_id = MEGASAS_DEV_INDEX(instance, scp); 1462 device_id = MEGASAS_DEV_INDEX(scp);
1501 1463
1502 fusion = instance->ctrl_context; 1464 fusion = instance->ctrl_context;
1503 1465
@@ -1621,6 +1583,14 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1621 cmd->pd_r1_lb = io_info.pd_after_lb; 1583 cmd->pd_r1_lb = io_info.pd_after_lb;
1622 } else 1584 } else
1623 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 1585 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
1586
1587 if ((raidLUN[0] == 1) &&
1588 (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 2)) {
1589 instance->dev_handle = !(instance->dev_handle);
1590 io_info.devHandle =
1591 local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle];
1592 }
1593
1624 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; 1594 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
1625 io_request->DevHandle = io_info.devHandle; 1595 io_request->DevHandle = io_info.devHandle;
1626 /* populate the LUN field */ 1596 /* populate the LUN field */
@@ -1650,121 +1620,68 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1650} 1620}
1651 1621
1652/** 1622/**
1653 * megasas_build_dcdb_fusion - Prepares IOs to devices 1623 * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk
1654 * @instance: Adapter soft state 1624 * @instance: Adapter soft state
1655 * @scp: SCSI command 1625 * @scp: SCSI command
1656 * @cmd: Command to be prepared 1626 * @cmd: Command to be prepared
1657 * 1627 *
1658 * Prepares the io_request frame for non-io cmds 1628 * Prepares the io_request frame for non-rw io cmds for vd.
1659 */ 1629 */
1660static void 1630static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
1661megasas_build_dcdb_fusion(struct megasas_instance *instance, 1631 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd)
1662 struct scsi_cmnd *scmd,
1663 struct megasas_cmd_fusion *cmd)
1664{ 1632{
1665 u32 device_id; 1633 u32 device_id;
1666 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 1634 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1667 u16 pd_index = 0; 1635 u16 pd_index = 0;
1668 u16 os_timeout_value;
1669 u16 timeout_limit;
1670 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1636 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1671 struct fusion_context *fusion = instance->ctrl_context; 1637 struct fusion_context *fusion = instance->ctrl_context;
1672 u8 span, physArm; 1638 u8 span, physArm;
1673 u16 devHandle; 1639 __le16 devHandle;
1674 u32 ld, arRef, pd; 1640 u32 ld, arRef, pd;
1675 struct MR_LD_RAID *raid; 1641 struct MR_LD_RAID *raid;
1676 struct RAID_CONTEXT *pRAID_Context; 1642 struct RAID_CONTEXT *pRAID_Context;
1643 u8 fp_possible = 1;
1677 1644
1678 io_request = cmd->io_request; 1645 io_request = cmd->io_request;
1679 device_id = MEGASAS_DEV_INDEX(instance, scmd); 1646 device_id = MEGASAS_DEV_INDEX(scmd);
1680 pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL) 1647 pd_index = MEGASAS_PD_INDEX(scmd);
1681 +scmd->device->id;
1682 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1648 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1683
1684 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 1649 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
1650 /* get RAID_Context pointer */
1651 pRAID_Context = &io_request->RaidContext;
1652 /* Check with FW team */
1653 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1654 pRAID_Context->regLockRowLBA = 0;
1655 pRAID_Context->regLockLength = 0;
1685 1656
1686 if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && 1657 if (fusion->fast_path_io && (
1687 instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { 1658 device_id < instance->fw_supported_vd_count)) {
1688 if (fusion->fast_path_io)
1689 io_request->DevHandle =
1690 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
1691 io_request->RaidContext.RAIDFlags =
1692 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
1693 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1694 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
1695 cmd->request_desc->SCSIIO.MSIxIndex =
1696 instance->msix_vectors ?
1697 raw_smp_processor_id() %
1698 instance->msix_vectors :
1699 0;
1700 os_timeout_value = scmd->request->timeout / HZ;
1701
1702 if (instance->secure_jbod_support &&
1703 (megasas_cmd_type(scmd) == NON_READ_WRITE_SYSPDIO)) {
1704 /* system pd firmware path */
1705 io_request->Function =
1706 MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1707 cmd->request_desc->SCSIIO.RequestFlags =
1708 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1709 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1710 io_request->RaidContext.timeoutValue =
1711 cpu_to_le16(os_timeout_value);
1712 } else {
1713 /* system pd Fast Path */
1714 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1715 io_request->RaidContext.regLockFlags = 0;
1716 io_request->RaidContext.regLockRowLBA = 0;
1717 io_request->RaidContext.regLockLength = 0;
1718 timeout_limit = (scmd->device->type == TYPE_DISK) ?
1719 255 : 0xFFFF;
1720 io_request->RaidContext.timeoutValue =
1721 cpu_to_le16((os_timeout_value > timeout_limit) ?
1722 timeout_limit : os_timeout_value);
1723 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
1724 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
1725 io_request->IoFlags |=
1726 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1727
1728 cmd->request_desc->SCSIIO.RequestFlags =
1729 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1730 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1731 }
1732 } else {
1733 if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS)
1734 goto NonFastPath;
1735
1736 /*
1737 * For older firmware, Driver should not access ldTgtIdToLd
1738 * beyond index 127 and for Extended VD firmware, ldTgtIdToLd
1739 * should not go beyond 255.
1740 */
1741
1742 if ((!fusion->fast_path_io) ||
1743 (device_id >= instance->fw_supported_vd_count))
1744 goto NonFastPath;
1745 1659
1746 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1660 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1747
1748 if (ld >= instance->fw_supported_vd_count) 1661 if (ld >= instance->fw_supported_vd_count)
1749 goto NonFastPath; 1662 fp_possible = 0;
1750 1663
1751 raid = MR_LdRaidGet(ld, local_map_ptr); 1664 raid = MR_LdRaidGet(ld, local_map_ptr);
1752
1753 /* check if this LD is FP capable */
1754 if (!(raid->capability.fpNonRWCapable)) 1665 if (!(raid->capability.fpNonRWCapable))
1755 /* not FP capable, send as non-FP */ 1666 fp_possible = 0;
1756 goto NonFastPath; 1667 } else
1668 fp_possible = 0;
1757 1669
1758 /* get RAID_Context pointer */ 1670 if (!fp_possible) {
1759 pRAID_Context = &io_request->RaidContext; 1671 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1672 io_request->DevHandle = cpu_to_le16(device_id);
1673 io_request->LUN[1] = scmd->device->lun;
1674 pRAID_Context->timeoutValue =
1675 cpu_to_le16 (scmd->request->timeout / HZ);
1676 cmd->request_desc->SCSIIO.RequestFlags =
1677 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1678 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1679 } else {
1760 1680
1761 /* set RAID context values */ 1681 /* set RAID context values */
1762 pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ; 1682 pRAID_Context->configSeqNum = raid->seqNum;
1763 pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd); 1683 pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
1764 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 1684 pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
1765 pRAID_Context->regLockRowLBA = 0;
1766 pRAID_Context->regLockLength = 0;
1767 pRAID_Context->configSeqNum = raid->seqNum;
1768 1685
1769 /* get the DevHandle for the PD (since this is 1686 /* get the DevHandle for the PD (since this is
1770 fpNonRWCapable, this is a single disk RAID0) */ 1687 fpNonRWCapable, this is a single disk RAID0) */
@@ -1776,7 +1693,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
1776 /* build request descriptor */ 1693 /* build request descriptor */
1777 cmd->request_desc->SCSIIO.RequestFlags = 1694 cmd->request_desc->SCSIIO.RequestFlags =
1778 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 1695 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1779 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1696 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1780 cmd->request_desc->SCSIIO.DevHandle = devHandle; 1697 cmd->request_desc->SCSIIO.DevHandle = devHandle;
1781 1698
1782 /* populate the LUN field */ 1699 /* populate the LUN field */
@@ -1785,18 +1702,87 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
1785 /* build the raidScsiIO structure */ 1702 /* build the raidScsiIO structure */
1786 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1703 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1787 io_request->DevHandle = devHandle; 1704 io_request->DevHandle = devHandle;
1705 }
1706}
1788 1707
1789 return; 1708/**
1709 * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd
1710 * @instance: Adapter soft state
1711 * @scp: SCSI command
1712 * @cmd: Command to be prepared
1713 * @fp_possible: parameter to detect fast path or firmware path io.
1714 *
1715 * Prepares the io_request frame for rw/non-rw io cmds for syspds
1716 */
1717static void
1718megasas_build_syspd_fusion(struct megasas_instance *instance,
1719 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible)
1720{
1721 u32 device_id;
1722 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1723 u16 pd_index = 0;
1724 u16 os_timeout_value;
1725 u16 timeout_limit;
1726 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1727 struct RAID_CONTEXT *pRAID_Context;
1728 struct fusion_context *fusion = instance->ctrl_context;
1729
1730 device_id = MEGASAS_DEV_INDEX(scmd);
1731 pd_index = MEGASAS_PD_INDEX(scmd);
1732 os_timeout_value = scmd->request->timeout / HZ;
1790 1733
1791NonFastPath: 1734 io_request = cmd->io_request;
1735 /* get RAID_Context pointer */
1736 pRAID_Context = &io_request->RaidContext;
1737 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
1738 io_request->LUN[1] = scmd->device->lun;
1739 pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
1740 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1741
1742 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1743 pRAID_Context->configSeqNum = 0;
1744 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1745 io_request->DevHandle =
1746 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
1747
1748 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
1749 cmd->request_desc->SCSIIO.MSIxIndex =
1750 instance->msix_vectors ?
1751 (raw_smp_processor_id() % instance->msix_vectors) : 0;
1752
1753
1754 if (!fp_possible) {
1755 /* system pd firmware path */
1792 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 1756 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1793 io_request->DevHandle = cpu_to_le16(device_id);
1794 cmd->request_desc->SCSIIO.RequestFlags = 1757 cmd->request_desc->SCSIIO.RequestFlags =
1795 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 1758 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1796 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1759 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1760 pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value);
1761 } else {
1762 /* system pd Fast Path */
1763 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1764 pRAID_Context->regLockFlags = 0;
1765 pRAID_Context->regLockRowLBA = 0;
1766 pRAID_Context->regLockLength = 0;
1767 timeout_limit = (scmd->device->type == TYPE_DISK) ?
1768 255 : 0xFFFF;
1769 pRAID_Context->timeoutValue =
1770 cpu_to_le16((os_timeout_value > timeout_limit) ?
1771 timeout_limit : os_timeout_value);
1772 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
1773 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
1774 cmd->request_desc->SCSIIO.RequestFlags |=
1775 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1776 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1777 pRAID_Context->Type = MPI2_TYPE_CUDA;
1778 pRAID_Context->nseg = 0x1;
1779 io_request->IoFlags |=
1780 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1781 }
1782 cmd->request_desc->SCSIIO.RequestFlags =
1783 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1784 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1797 } 1785 }
1798 io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
1799 int_to_scsilun(scmd->device->lun, (struct scsi_lun *)io_request->LUN);
1800} 1786}
1801 1787
1802/** 1788/**
@@ -1813,11 +1799,10 @@ megasas_build_io_fusion(struct megasas_instance *instance,
1813 struct scsi_cmnd *scp, 1799 struct scsi_cmnd *scp,
1814 struct megasas_cmd_fusion *cmd) 1800 struct megasas_cmd_fusion *cmd)
1815{ 1801{
1816 u32 device_id, sge_count; 1802 u32 sge_count;
1803 u8 cmd_type;
1817 struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request; 1804 struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
1818 1805
1819 device_id = MEGASAS_DEV_INDEX(instance, scp);
1820
1821 /* Zero out some fields so they don't get reused */ 1806 /* Zero out some fields so they don't get reused */
1822 memset(io_request->LUN, 0x0, 8); 1807 memset(io_request->LUN, 0x0, 8);
1823 io_request->CDB.EEDP32.PrimaryReferenceTag = 0; 1808 io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
@@ -1837,10 +1822,24 @@ megasas_build_io_fusion(struct megasas_instance *instance,
1837 */ 1822 */
1838 io_request->IoFlags = cpu_to_le16(scp->cmd_len); 1823 io_request->IoFlags = cpu_to_le16(scp->cmd_len);
1839 1824
1840 if (megasas_cmd_type(scp) == READ_WRITE_LDIO) 1825 switch (cmd_type = megasas_cmd_type(scp)) {
1826 case READ_WRITE_LDIO:
1841 megasas_build_ldio_fusion(instance, scp, cmd); 1827 megasas_build_ldio_fusion(instance, scp, cmd);
1842 else 1828 break;
1843 megasas_build_dcdb_fusion(instance, scp, cmd); 1829 case NON_READ_WRITE_LDIO:
1830 megasas_build_ld_nonrw_fusion(instance, scp, cmd);
1831 break;
1832 case READ_WRITE_SYSPDIO:
1833 case NON_READ_WRITE_SYSPDIO:
1834 if (instance->secure_jbod_support &&
1835 (cmd_type == NON_READ_WRITE_SYSPDIO))
1836 megasas_build_syspd_fusion(instance, scp, cmd, 0);
1837 else
1838 megasas_build_syspd_fusion(instance, scp, cmd, 1);
1839 break;
1840 default:
1841 break;
1842 }
1844 1843
1845 /* 1844 /*
1846 * Construct SGL 1845 * Construct SGL
@@ -1915,9 +1914,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
1915 1914
1916 fusion = instance->ctrl_context; 1915 fusion = instance->ctrl_context;
1917 1916
1918 cmd = megasas_get_cmd_fusion(instance); 1917 cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
1919 if (!cmd)
1920 return SCSI_MLQUEUE_HOST_BUSY;
1921 1918
1922 index = cmd->index; 1919 index = cmd->index;
1923 1920
@@ -1948,9 +1945,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
1948 */ 1945 */
1949 atomic_inc(&instance->fw_outstanding); 1946 atomic_inc(&instance->fw_outstanding);
1950 1947
1951 instance->instancet->fire_cmd(instance, 1948 megasas_fire_cmd_fusion(instance, req_desc);
1952 req_desc->u.low, req_desc->u.high,
1953 instance->reg_set);
1954 1949
1955 return 0; 1950 return 0;
1956} 1951}
@@ -1975,6 +1970,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
1975 union desc_value d_val; 1970 union desc_value d_val;
1976 struct LD_LOAD_BALANCE_INFO *lbinfo; 1971 struct LD_LOAD_BALANCE_INFO *lbinfo;
1977 int threshold_reply_count = 0; 1972 int threshold_reply_count = 0;
1973 struct scsi_cmnd *scmd_local = NULL;
1978 1974
1979 fusion = instance->ctrl_context; 1975 fusion = instance->ctrl_context;
1980 1976
@@ -1998,7 +1994,8 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
1998 1994
1999 num_completed = 0; 1995 num_completed = 0;
2000 1996
2001 while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) { 1997 while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
1998 d_val.u.high != cpu_to_le32(UINT_MAX)) {
2002 smid = le16_to_cpu(reply_desc->SMID); 1999 smid = le16_to_cpu(reply_desc->SMID);
2003 2000
2004 cmd_fusion = fusion->cmd_list[smid - 1]; 2001 cmd_fusion = fusion->cmd_list[smid - 1];
@@ -2010,14 +2007,14 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
2010 if (cmd_fusion->scmd) 2007 if (cmd_fusion->scmd)
2011 cmd_fusion->scmd->SCp.ptr = NULL; 2008 cmd_fusion->scmd->SCp.ptr = NULL;
2012 2009
2010 scmd_local = cmd_fusion->scmd;
2013 status = scsi_io_req->RaidContext.status; 2011 status = scsi_io_req->RaidContext.status;
2014 extStatus = scsi_io_req->RaidContext.exStatus; 2012 extStatus = scsi_io_req->RaidContext.exStatus;
2015 2013
2016 switch (scsi_io_req->Function) { 2014 switch (scsi_io_req->Function) {
2017 case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/ 2015 case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
2018 /* Update load balancing info */ 2016 /* Update load balancing info */
2019 device_id = MEGASAS_DEV_INDEX(instance, 2017 device_id = MEGASAS_DEV_INDEX(scmd_local);
2020 cmd_fusion->scmd);
2021 lbinfo = &fusion->load_balance_info[device_id]; 2018 lbinfo = &fusion->load_balance_info[device_id];
2022 if (cmd_fusion->scmd->SCp.Status & 2019 if (cmd_fusion->scmd->SCp.Status &
2023 MEGASAS_LOAD_BALANCE_FLAG) { 2020 MEGASAS_LOAD_BALANCE_FLAG) {
@@ -2035,29 +2032,25 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
2035 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ 2032 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
2036 /* Map the FW Cmd Status */ 2033 /* Map the FW Cmd Status */
2037 map_cmd_status(cmd_fusion, status, extStatus); 2034 map_cmd_status(cmd_fusion, status, extStatus);
2038 scsi_dma_unmap(cmd_fusion->scmd);
2039 cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
2040 scsi_io_req->RaidContext.status = 0; 2035 scsi_io_req->RaidContext.status = 0;
2041 scsi_io_req->RaidContext.exStatus = 0; 2036 scsi_io_req->RaidContext.exStatus = 0;
2042 megasas_return_cmd_fusion(instance, cmd_fusion); 2037 megasas_return_cmd_fusion(instance, cmd_fusion);
2038 scsi_dma_unmap(scmd_local);
2039 scmd_local->scsi_done(scmd_local);
2043 atomic_dec(&instance->fw_outstanding); 2040 atomic_dec(&instance->fw_outstanding);
2044 2041
2045 break; 2042 break;
2046 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ 2043 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
2047 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2044 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2048 2045
2049 if (!cmd_mfi->mpt_pthr_cmd_blocked) { 2046 /* Poll mode. Dummy free.
2050 if (megasas_dbg_lvl == 5) 2047 * In case of Interrupt mode, caller has reverse check.
2051 dev_info(&instance->pdev->dev, 2048 */
2052 "freeing mfi/mpt pass-through " 2049 if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) {
2053 "from %s %d\n", 2050 cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE;
2054 __func__, __LINE__); 2051 megasas_return_cmd(instance, cmd_mfi);
2055 megasas_return_mfi_mpt_pthr(instance, cmd_mfi, 2052 } else
2056 cmd_fusion); 2053 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2057 }
2058
2059 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2060 cmd_fusion->flags = 0;
2061 break; 2054 break;
2062 } 2055 }
2063 2056
@@ -2066,7 +2059,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
2066 fusion->reply_q_depth) 2059 fusion->reply_q_depth)
2067 fusion->last_reply_idx[MSIxIndex] = 0; 2060 fusion->last_reply_idx[MSIxIndex] = 0;
2068 2061
2069 desc->Words = ULLONG_MAX; 2062 desc->Words = cpu_to_le64(ULLONG_MAX);
2070 num_completed++; 2063 num_completed++;
2071 threshold_reply_count++; 2064 threshold_reply_count++;
2072 2065
@@ -2217,27 +2210,14 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
2217 struct megasas_cmd_fusion *cmd; 2210 struct megasas_cmd_fusion *cmd;
2218 struct fusion_context *fusion; 2211 struct fusion_context *fusion;
2219 struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr; 2212 struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
2220 u32 opcode;
2221 2213
2222 cmd = megasas_get_cmd_fusion(instance); 2214 fusion = instance->ctrl_context;
2223 if (!cmd) 2215
2224 return 1; 2216 cmd = megasas_get_cmd_fusion(instance,
2217 instance->max_scsi_cmds + mfi_cmd->index);
2225 2218
2226 /* Save the smid. To be used for returning the cmd */ 2219 /* Save the smid. To be used for returning the cmd */
2227 mfi_cmd->context.smid = cmd->index; 2220 mfi_cmd->context.smid = cmd->index;
2228 cmd->sync_cmd_idx = mfi_cmd->index;
2229
2230 /* Set this only for Blocked commands */
2231 opcode = le32_to_cpu(mfi_cmd->frame->dcmd.opcode);
2232 if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
2233 && (mfi_cmd->frame->dcmd.mbox.b[1] == 1))
2234 mfi_cmd->is_wait_event = 1;
2235
2236 if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
2237 mfi_cmd->is_wait_event = 1;
2238
2239 if (mfi_cmd->is_wait_event)
2240 mfi_cmd->mpt_pthr_cmd_blocked = cmd;
2241 2221
2242 /* 2222 /*
2243 * For cmds where the flag is set, store the flag and check 2223 * For cmds where the flag is set, store the flag and check
@@ -2246,9 +2226,8 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
2246 */ 2226 */
2247 2227
2248 if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) 2228 if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
2249 cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2229 mfi_cmd->flags |= DRV_DCMD_POLLED_MODE;
2250 2230
2251 fusion = instance->ctrl_context;
2252 io_req = cmd->io_request; 2231 io_req = cmd->io_request;
2253 2232
2254 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 2233 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
@@ -2327,9 +2306,7 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
2327 printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n"); 2306 printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n");
2328 return; 2307 return;
2329 } 2308 }
2330 atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_ATTACHED); 2309 megasas_fire_cmd_fusion(instance, req_desc);
2331 instance->instancet->fire_cmd(instance, req_desc->u.low,
2332 req_desc->u.high, instance->reg_set);
2333} 2310}
2334 2311
2335/** 2312/**
@@ -2508,7 +2485,42 @@ void megasas_reset_reply_desc(struct megasas_instance *instance)
2508 fusion->last_reply_idx[i] = 0; 2485 fusion->last_reply_idx[i] = 0;
2509 reply_desc = fusion->reply_frames_desc; 2486 reply_desc = fusion->reply_frames_desc;
2510 for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++) 2487 for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++)
2511 reply_desc->Words = ULLONG_MAX; 2488 reply_desc->Words = cpu_to_le64(ULLONG_MAX);
2489}
2490
2491/*
2492 * megasas_refire_mgmt_cmd : Re-fire management commands
2493 * @instance: Controller's soft instance
2494*/
2495void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
2496{
2497 int j;
2498 struct megasas_cmd_fusion *cmd_fusion;
2499 struct fusion_context *fusion;
2500 struct megasas_cmd *cmd_mfi;
2501 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2502 u16 smid;
2503
2504 fusion = instance->ctrl_context;
2505
2506 /* Re-fire management commands.
2507 * Do not traverse complet MPT frame pool. Start from max_scsi_cmds.
2508 */
2509 for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) {
2510 cmd_fusion = fusion->cmd_list[j];
2511 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2512 smid = le16_to_cpu(cmd_mfi->context.smid);
2513
2514 if (!smid)
2515 continue;
2516 req_desc = megasas_get_request_descriptor
2517 (instance, smid - 1);
2518 if (req_desc && (cmd_mfi->frame->dcmd.opcode !=
2519 cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)))
2520 megasas_fire_cmd_fusion(instance, req_desc);
2521 else
2522 megasas_return_cmd(instance, cmd_mfi);
2523 }
2512} 2524}
2513 2525
2514/* Check for a second path that is currently UP */ 2526/* Check for a second path that is currently UP */
@@ -2538,14 +2550,13 @@ out:
2538/* Core fusion reset function */ 2550/* Core fusion reset function */
2539int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) 2551int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2540{ 2552{
2541 int retval = SUCCESS, i, j, retry = 0, convert = 0; 2553 int retval = SUCCESS, i, retry = 0, convert = 0;
2542 struct megasas_instance *instance; 2554 struct megasas_instance *instance;
2543 struct megasas_cmd_fusion *cmd_fusion; 2555 struct megasas_cmd_fusion *cmd_fusion;
2544 struct fusion_context *fusion; 2556 struct fusion_context *fusion;
2545 struct megasas_cmd *cmd_mfi;
2546 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2547 u32 host_diag, abs_state, status_reg, reset_adapter; 2557 u32 host_diag, abs_state, status_reg, reset_adapter;
2548 u32 io_timeout_in_crash_mode = 0; 2558 u32 io_timeout_in_crash_mode = 0;
2559 struct scsi_cmnd *scmd_local = NULL;
2549 2560
2550 instance = (struct megasas_instance *)shost->hostdata; 2561 instance = (struct megasas_instance *)shost->hostdata;
2551 fusion = instance->ctrl_context; 2562 fusion = instance->ctrl_context;
@@ -2613,15 +2624,16 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2613 iotimeout = 0; 2624 iotimeout = 0;
2614 2625
2615 /* Now return commands back to the OS */ 2626 /* Now return commands back to the OS */
2616 for (i = 0 ; i < instance->max_fw_cmds; i++) { 2627 for (i = 0 ; i < instance->max_scsi_cmds; i++) {
2617 cmd_fusion = fusion->cmd_list[i]; 2628 cmd_fusion = fusion->cmd_list[i];
2629 scmd_local = cmd_fusion->scmd;
2618 if (cmd_fusion->scmd) { 2630 if (cmd_fusion->scmd) {
2619 scsi_dma_unmap(cmd_fusion->scmd); 2631 scmd_local->result =
2620 cmd_fusion->scmd->result =
2621 megasas_check_mpio_paths(instance, 2632 megasas_check_mpio_paths(instance,
2622 cmd_fusion->scmd); 2633 scmd_local);
2623 cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
2624 megasas_return_cmd_fusion(instance, cmd_fusion); 2634 megasas_return_cmd_fusion(instance, cmd_fusion);
2635 scsi_dma_unmap(scmd_local);
2636 scmd_local->scsi_done(scmd_local);
2625 atomic_dec(&instance->fw_outstanding); 2637 atomic_dec(&instance->fw_outstanding);
2626 } 2638 }
2627 } 2639 }
@@ -2790,44 +2802,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2790 continue; 2802 continue;
2791 } 2803 }
2792 2804
2793 /* Re-fire management commands */ 2805 megasas_refire_mgmt_cmd(instance);
2794 for (j = 0 ; j < instance->max_fw_cmds; j++) {
2795 cmd_fusion = fusion->cmd_list[j];
2796 if (cmd_fusion->sync_cmd_idx !=
2797 (u32)ULONG_MAX) {
2798 cmd_mfi =
2799 instance->
2800 cmd_list[cmd_fusion->sync_cmd_idx];
2801 if (cmd_mfi->frame->dcmd.opcode ==
2802 cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) {
2803 megasas_return_mfi_mpt_pthr(instance, cmd_mfi, cmd_fusion);
2804 } else {
2805 req_desc =
2806 megasas_get_request_descriptor(
2807 instance,
2808 cmd_mfi->context.smid
2809 -1);
2810 if (!req_desc) {
2811 printk(KERN_WARNING
2812 "req_desc NULL"
2813 " for scsi%d\n",
2814 instance->host->host_no);
2815 /* Return leaked MPT
2816 frame */
2817 megasas_return_cmd_fusion(instance, cmd_fusion);
2818 } else {
2819 instance->instancet->
2820 fire_cmd(instance,
2821 req_desc->
2822 u.low,
2823 req_desc->
2824 u.high,
2825 instance->
2826 reg_set);
2827 }
2828 }
2829 }
2830 }
2831 2806
2832 if (megasas_get_ctrl_info(instance)) { 2807 if (megasas_get_ctrl_info(instance)) {
2833 dev_info(&instance->pdev->dev, 2808 dev_info(&instance->pdev->dev,
@@ -2978,7 +2953,6 @@ void megasas_fusion_ocr_wq(struct work_struct *work)
2978} 2953}
2979 2954
2980struct megasas_instance_template megasas_instance_template_fusion = { 2955struct megasas_instance_template megasas_instance_template_fusion = {
2981 .fire_cmd = megasas_fire_cmd_fusion,
2982 .enable_intr = megasas_enable_intr_fusion, 2956 .enable_intr = megasas_enable_intr_fusion,
2983 .disable_intr = megasas_disable_intr_fusion, 2957 .disable_intr = megasas_disable_intr_fusion,
2984 .clear_intr = megasas_clear_intr_fusion, 2958 .clear_intr = megasas_clear_intr_fusion,
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 56e6db2d5874..ced6dc0cf8e8 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -104,18 +104,18 @@ struct RAID_CONTEXT {
104 u8 nseg:4; 104 u8 nseg:4;
105#endif 105#endif
106 u8 resvd0; 106 u8 resvd0;
107 u16 timeoutValue; 107 __le16 timeoutValue;
108 u8 regLockFlags; 108 u8 regLockFlags;
109 u8 resvd1; 109 u8 resvd1;
110 u16 VirtualDiskTgtId; 110 __le16 VirtualDiskTgtId;
111 u64 regLockRowLBA; 111 __le64 regLockRowLBA;
112 u32 regLockLength; 112 __le32 regLockLength;
113 u16 nextLMId; 113 __le16 nextLMId;
114 u8 exStatus; 114 u8 exStatus;
115 u8 status; 115 u8 status;
116 u8 RAIDFlags; 116 u8 RAIDFlags;
117 u8 numSGE; 117 u8 numSGE;
118 u16 configSeqNum; 118 __le16 configSeqNum;
119 u8 spanArm; 119 u8 spanArm;
120 u8 resvd2[3]; 120 u8 resvd2[3];
121}; 121};
@@ -182,61 +182,61 @@ enum REGION_TYPE {
182#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD) 182#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
183 183
184struct MPI25_IEEE_SGE_CHAIN64 { 184struct MPI25_IEEE_SGE_CHAIN64 {
185 u64 Address; 185 __le64 Address;
186 u32 Length; 186 __le32 Length;
187 u16 Reserved1; 187 __le16 Reserved1;
188 u8 NextChainOffset; 188 u8 NextChainOffset;
189 u8 Flags; 189 u8 Flags;
190}; 190};
191 191
192struct MPI2_SGE_SIMPLE_UNION { 192struct MPI2_SGE_SIMPLE_UNION {
193 u32 FlagsLength; 193 __le32 FlagsLength;
194 union { 194 union {
195 u32 Address32; 195 __le32 Address32;
196 u64 Address64; 196 __le64 Address64;
197 } u; 197 } u;
198}; 198};
199 199
200struct MPI2_SCSI_IO_CDB_EEDP32 { 200struct MPI2_SCSI_IO_CDB_EEDP32 {
201 u8 CDB[20]; /* 0x00 */ 201 u8 CDB[20]; /* 0x00 */
202 u32 PrimaryReferenceTag; /* 0x14 */ 202 __be32 PrimaryReferenceTag; /* 0x14 */
203 u16 PrimaryApplicationTag; /* 0x18 */ 203 __be16 PrimaryApplicationTag; /* 0x18 */
204 u16 PrimaryApplicationTagMask; /* 0x1A */ 204 __be16 PrimaryApplicationTagMask; /* 0x1A */
205 u32 TransferLength; /* 0x1C */ 205 __le32 TransferLength; /* 0x1C */
206}; 206};
207 207
208struct MPI2_SGE_CHAIN_UNION { 208struct MPI2_SGE_CHAIN_UNION {
209 u16 Length; 209 __le16 Length;
210 u8 NextChainOffset; 210 u8 NextChainOffset;
211 u8 Flags; 211 u8 Flags;
212 union { 212 union {
213 u32 Address32; 213 __le32 Address32;
214 u64 Address64; 214 __le64 Address64;
215 } u; 215 } u;
216}; 216};
217 217
218struct MPI2_IEEE_SGE_SIMPLE32 { 218struct MPI2_IEEE_SGE_SIMPLE32 {
219 u32 Address; 219 __le32 Address;
220 u32 FlagsLength; 220 __le32 FlagsLength;
221}; 221};
222 222
223struct MPI2_IEEE_SGE_CHAIN32 { 223struct MPI2_IEEE_SGE_CHAIN32 {
224 u32 Address; 224 __le32 Address;
225 u32 FlagsLength; 225 __le32 FlagsLength;
226}; 226};
227 227
228struct MPI2_IEEE_SGE_SIMPLE64 { 228struct MPI2_IEEE_SGE_SIMPLE64 {
229 u64 Address; 229 __le64 Address;
230 u32 Length; 230 __le32 Length;
231 u16 Reserved1; 231 __le16 Reserved1;
232 u8 Reserved2; 232 u8 Reserved2;
233 u8 Flags; 233 u8 Flags;
234}; 234};
235 235
236struct MPI2_IEEE_SGE_CHAIN64 { 236struct MPI2_IEEE_SGE_CHAIN64 {
237 u64 Address; 237 __le64 Address;
238 u32 Length; 238 __le32 Length;
239 u16 Reserved1; 239 __le16 Reserved1;
240 u8 Reserved2; 240 u8 Reserved2;
241 u8 Flags; 241 u8 Flags;
242}; 242};
@@ -269,34 +269,34 @@ union MPI2_SCSI_IO_CDB_UNION {
269 * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST 269 * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
270 */ 270 */
271struct MPI2_RAID_SCSI_IO_REQUEST { 271struct MPI2_RAID_SCSI_IO_REQUEST {
272 u16 DevHandle; /* 0x00 */ 272 __le16 DevHandle; /* 0x00 */
273 u8 ChainOffset; /* 0x02 */ 273 u8 ChainOffset; /* 0x02 */
274 u8 Function; /* 0x03 */ 274 u8 Function; /* 0x03 */
275 u16 Reserved1; /* 0x04 */ 275 __le16 Reserved1; /* 0x04 */
276 u8 Reserved2; /* 0x06 */ 276 u8 Reserved2; /* 0x06 */
277 u8 MsgFlags; /* 0x07 */ 277 u8 MsgFlags; /* 0x07 */
278 u8 VP_ID; /* 0x08 */ 278 u8 VP_ID; /* 0x08 */
279 u8 VF_ID; /* 0x09 */ 279 u8 VF_ID; /* 0x09 */
280 u16 Reserved3; /* 0x0A */ 280 __le16 Reserved3; /* 0x0A */
281 u32 SenseBufferLowAddress; /* 0x0C */ 281 __le32 SenseBufferLowAddress; /* 0x0C */
282 u16 SGLFlags; /* 0x10 */ 282 __le16 SGLFlags; /* 0x10 */
283 u8 SenseBufferLength; /* 0x12 */ 283 u8 SenseBufferLength; /* 0x12 */
284 u8 Reserved4; /* 0x13 */ 284 u8 Reserved4; /* 0x13 */
285 u8 SGLOffset0; /* 0x14 */ 285 u8 SGLOffset0; /* 0x14 */
286 u8 SGLOffset1; /* 0x15 */ 286 u8 SGLOffset1; /* 0x15 */
287 u8 SGLOffset2; /* 0x16 */ 287 u8 SGLOffset2; /* 0x16 */
288 u8 SGLOffset3; /* 0x17 */ 288 u8 SGLOffset3; /* 0x17 */
289 u32 SkipCount; /* 0x18 */ 289 __le32 SkipCount; /* 0x18 */
290 u32 DataLength; /* 0x1C */ 290 __le32 DataLength; /* 0x1C */
291 u32 BidirectionalDataLength; /* 0x20 */ 291 __le32 BidirectionalDataLength; /* 0x20 */
292 u16 IoFlags; /* 0x24 */ 292 __le16 IoFlags; /* 0x24 */
293 u16 EEDPFlags; /* 0x26 */ 293 __le16 EEDPFlags; /* 0x26 */
294 u32 EEDPBlockSize; /* 0x28 */ 294 __le32 EEDPBlockSize; /* 0x28 */
295 u32 SecondaryReferenceTag; /* 0x2C */ 295 __le32 SecondaryReferenceTag; /* 0x2C */
296 u16 SecondaryApplicationTag; /* 0x30 */ 296 __le16 SecondaryApplicationTag; /* 0x30 */
297 u16 ApplicationTagTranslationMask; /* 0x32 */ 297 __le16 ApplicationTagTranslationMask; /* 0x32 */
298 u8 LUN[8]; /* 0x34 */ 298 u8 LUN[8]; /* 0x34 */
299 u32 Control; /* 0x3C */ 299 __le32 Control; /* 0x3C */
300 union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */ 300 union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
301 struct RAID_CONTEXT RaidContext; /* 0x60 */ 301 struct RAID_CONTEXT RaidContext; /* 0x60 */
302 union MPI2_SGE_IO_UNION SGL; /* 0x80 */ 302 union MPI2_SGE_IO_UNION SGL; /* 0x80 */
@@ -315,45 +315,45 @@ struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
315struct MPI2_DEFAULT_REQUEST_DESCRIPTOR { 315struct MPI2_DEFAULT_REQUEST_DESCRIPTOR {
316 u8 RequestFlags; /* 0x00 */ 316 u8 RequestFlags; /* 0x00 */
317 u8 MSIxIndex; /* 0x01 */ 317 u8 MSIxIndex; /* 0x01 */
318 u16 SMID; /* 0x02 */ 318 __le16 SMID; /* 0x02 */
319 u16 LMID; /* 0x04 */ 319 __le16 LMID; /* 0x04 */
320 u16 DescriptorTypeDependent; /* 0x06 */ 320 __le16 DescriptorTypeDependent; /* 0x06 */
321}; 321};
322 322
323/* High Priority Request Descriptor */ 323/* High Priority Request Descriptor */
324struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR { 324struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
325 u8 RequestFlags; /* 0x00 */ 325 u8 RequestFlags; /* 0x00 */
326 u8 MSIxIndex; /* 0x01 */ 326 u8 MSIxIndex; /* 0x01 */
327 u16 SMID; /* 0x02 */ 327 __le16 SMID; /* 0x02 */
328 u16 LMID; /* 0x04 */ 328 __le16 LMID; /* 0x04 */
329 u16 Reserved1; /* 0x06 */ 329 __le16 Reserved1; /* 0x06 */
330}; 330};
331 331
332/* SCSI IO Request Descriptor */ 332/* SCSI IO Request Descriptor */
333struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR { 333struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
334 u8 RequestFlags; /* 0x00 */ 334 u8 RequestFlags; /* 0x00 */
335 u8 MSIxIndex; /* 0x01 */ 335 u8 MSIxIndex; /* 0x01 */
336 u16 SMID; /* 0x02 */ 336 __le16 SMID; /* 0x02 */
337 u16 LMID; /* 0x04 */ 337 __le16 LMID; /* 0x04 */
338 u16 DevHandle; /* 0x06 */ 338 __le16 DevHandle; /* 0x06 */
339}; 339};
340 340
341/* SCSI Target Request Descriptor */ 341/* SCSI Target Request Descriptor */
342struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR { 342struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
343 u8 RequestFlags; /* 0x00 */ 343 u8 RequestFlags; /* 0x00 */
344 u8 MSIxIndex; /* 0x01 */ 344 u8 MSIxIndex; /* 0x01 */
345 u16 SMID; /* 0x02 */ 345 __le16 SMID; /* 0x02 */
346 u16 LMID; /* 0x04 */ 346 __le16 LMID; /* 0x04 */
347 u16 IoIndex; /* 0x06 */ 347 __le16 IoIndex; /* 0x06 */
348}; 348};
349 349
350/* RAID Accelerator Request Descriptor */ 350/* RAID Accelerator Request Descriptor */
351struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR { 351struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
352 u8 RequestFlags; /* 0x00 */ 352 u8 RequestFlags; /* 0x00 */
353 u8 MSIxIndex; /* 0x01 */ 353 u8 MSIxIndex; /* 0x01 */
354 u16 SMID; /* 0x02 */ 354 __le16 SMID; /* 0x02 */
355 u16 LMID; /* 0x04 */ 355 __le16 LMID; /* 0x04 */
356 u16 Reserved; /* 0x06 */ 356 __le16 Reserved; /* 0x06 */
357}; 357};
358 358
359/* union of Request Descriptors */ 359/* union of Request Descriptors */
@@ -366,10 +366,10 @@ union MEGASAS_REQUEST_DESCRIPTOR_UNION {
366 struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo; 366 struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo;
367 union { 367 union {
368 struct { 368 struct {
369 u32 low; 369 __le32 low;
370 u32 high; 370 __le32 high;
371 } u; 371 } u;
372 u64 Words; 372 __le64 Words;
373 }; 373 };
374}; 374};
375 375
@@ -377,35 +377,35 @@ union MEGASAS_REQUEST_DESCRIPTOR_UNION {
377struct MPI2_DEFAULT_REPLY_DESCRIPTOR { 377struct MPI2_DEFAULT_REPLY_DESCRIPTOR {
378 u8 ReplyFlags; /* 0x00 */ 378 u8 ReplyFlags; /* 0x00 */
379 u8 MSIxIndex; /* 0x01 */ 379 u8 MSIxIndex; /* 0x01 */
380 u16 DescriptorTypeDependent1; /* 0x02 */ 380 __le16 DescriptorTypeDependent1; /* 0x02 */
381 u32 DescriptorTypeDependent2; /* 0x04 */ 381 __le32 DescriptorTypeDependent2; /* 0x04 */
382}; 382};
383 383
384/* Address Reply Descriptor */ 384/* Address Reply Descriptor */
385struct MPI2_ADDRESS_REPLY_DESCRIPTOR { 385struct MPI2_ADDRESS_REPLY_DESCRIPTOR {
386 u8 ReplyFlags; /* 0x00 */ 386 u8 ReplyFlags; /* 0x00 */
387 u8 MSIxIndex; /* 0x01 */ 387 u8 MSIxIndex; /* 0x01 */
388 u16 SMID; /* 0x02 */ 388 __le16 SMID; /* 0x02 */
389 u32 ReplyFrameAddress; /* 0x04 */ 389 __le32 ReplyFrameAddress; /* 0x04 */
390}; 390};
391 391
392/* SCSI IO Success Reply Descriptor */ 392/* SCSI IO Success Reply Descriptor */
393struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR { 393struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
394 u8 ReplyFlags; /* 0x00 */ 394 u8 ReplyFlags; /* 0x00 */
395 u8 MSIxIndex; /* 0x01 */ 395 u8 MSIxIndex; /* 0x01 */
396 u16 SMID; /* 0x02 */ 396 __le16 SMID; /* 0x02 */
397 u16 TaskTag; /* 0x04 */ 397 __le16 TaskTag; /* 0x04 */
398 u16 Reserved1; /* 0x06 */ 398 __le16 Reserved1; /* 0x06 */
399}; 399};
400 400
401/* TargetAssist Success Reply Descriptor */ 401/* TargetAssist Success Reply Descriptor */
402struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR { 402struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
403 u8 ReplyFlags; /* 0x00 */ 403 u8 ReplyFlags; /* 0x00 */
404 u8 MSIxIndex; /* 0x01 */ 404 u8 MSIxIndex; /* 0x01 */
405 u16 SMID; /* 0x02 */ 405 __le16 SMID; /* 0x02 */
406 u8 SequenceNumber; /* 0x04 */ 406 u8 SequenceNumber; /* 0x04 */
407 u8 Reserved1; /* 0x05 */ 407 u8 Reserved1; /* 0x05 */
408 u16 IoIndex; /* 0x06 */ 408 __le16 IoIndex; /* 0x06 */
409}; 409};
410 410
411/* Target Command Buffer Reply Descriptor */ 411/* Target Command Buffer Reply Descriptor */
@@ -414,16 +414,16 @@ struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
414 u8 MSIxIndex; /* 0x01 */ 414 u8 MSIxIndex; /* 0x01 */
415 u8 VP_ID; /* 0x02 */ 415 u8 VP_ID; /* 0x02 */
416 u8 Flags; /* 0x03 */ 416 u8 Flags; /* 0x03 */
417 u16 InitiatorDevHandle; /* 0x04 */ 417 __le16 InitiatorDevHandle; /* 0x04 */
418 u16 IoIndex; /* 0x06 */ 418 __le16 IoIndex; /* 0x06 */
419}; 419};
420 420
421/* RAID Accelerator Success Reply Descriptor */ 421/* RAID Accelerator Success Reply Descriptor */
422struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR { 422struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
423 u8 ReplyFlags; /* 0x00 */ 423 u8 ReplyFlags; /* 0x00 */
424 u8 MSIxIndex; /* 0x01 */ 424 u8 MSIxIndex; /* 0x01 */
425 u16 SMID; /* 0x02 */ 425 __le16 SMID; /* 0x02 */
426 u32 Reserved; /* 0x04 */ 426 __le32 Reserved; /* 0x04 */
427}; 427};
428 428
429/* union of Reply Descriptors */ 429/* union of Reply Descriptors */
@@ -435,7 +435,7 @@ union MPI2_REPLY_DESCRIPTORS_UNION {
435 struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; 435 struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
436 struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR 436 struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
437 RAIDAcceleratorSuccess; 437 RAIDAcceleratorSuccess;
438 u64 Words; 438 __le64 Words;
439}; 439};
440 440
441/* IOCInit Request message */ 441/* IOCInit Request message */
@@ -444,28 +444,28 @@ struct MPI2_IOC_INIT_REQUEST {
444 u8 Reserved1; /* 0x01 */ 444 u8 Reserved1; /* 0x01 */
445 u8 ChainOffset; /* 0x02 */ 445 u8 ChainOffset; /* 0x02 */
446 u8 Function; /* 0x03 */ 446 u8 Function; /* 0x03 */
447 u16 Reserved2; /* 0x04 */ 447 __le16 Reserved2; /* 0x04 */
448 u8 Reserved3; /* 0x06 */ 448 u8 Reserved3; /* 0x06 */
449 u8 MsgFlags; /* 0x07 */ 449 u8 MsgFlags; /* 0x07 */
450 u8 VP_ID; /* 0x08 */ 450 u8 VP_ID; /* 0x08 */
451 u8 VF_ID; /* 0x09 */ 451 u8 VF_ID; /* 0x09 */
452 u16 Reserved4; /* 0x0A */ 452 __le16 Reserved4; /* 0x0A */
453 u16 MsgVersion; /* 0x0C */ 453 __le16 MsgVersion; /* 0x0C */
454 u16 HeaderVersion; /* 0x0E */ 454 __le16 HeaderVersion; /* 0x0E */
455 u32 Reserved5; /* 0x10 */ 455 u32 Reserved5; /* 0x10 */
456 u16 Reserved6; /* 0x14 */ 456 __le16 Reserved6; /* 0x14 */
457 u8 Reserved7; /* 0x16 */ 457 u8 Reserved7; /* 0x16 */
458 u8 HostMSIxVectors; /* 0x17 */ 458 u8 HostMSIxVectors; /* 0x17 */
459 u16 Reserved8; /* 0x18 */ 459 __le16 Reserved8; /* 0x18 */
460 u16 SystemRequestFrameSize; /* 0x1A */ 460 __le16 SystemRequestFrameSize; /* 0x1A */
461 u16 ReplyDescriptorPostQueueDepth; /* 0x1C */ 461 __le16 ReplyDescriptorPostQueueDepth; /* 0x1C */
462 u16 ReplyFreeQueueDepth; /* 0x1E */ 462 __le16 ReplyFreeQueueDepth; /* 0x1E */
463 u32 SenseBufferAddressHigh; /* 0x20 */ 463 __le32 SenseBufferAddressHigh; /* 0x20 */
464 u32 SystemReplyAddressHigh; /* 0x24 */ 464 __le32 SystemReplyAddressHigh; /* 0x24 */
465 u64 SystemRequestFrameBaseAddress; /* 0x28 */ 465 __le64 SystemRequestFrameBaseAddress; /* 0x28 */
466 u64 ReplyDescriptorPostQueueAddress;/* 0x30 */ 466 __le64 ReplyDescriptorPostQueueAddress;/* 0x30 */
467 u64 ReplyFreeQueueAddress; /* 0x38 */ 467 __le64 ReplyFreeQueueAddress; /* 0x38 */
468 u64 TimeStamp; /* 0x40 */ 468 __le64 TimeStamp; /* 0x40 */
469}; 469};
470 470
471/* mrpriv defines */ 471/* mrpriv defines */
@@ -491,41 +491,41 @@ struct MPI2_IOC_INIT_REQUEST {
491#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200 491#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
492 492
493struct MR_DEV_HANDLE_INFO { 493struct MR_DEV_HANDLE_INFO {
494 u16 curDevHdl; 494 __le16 curDevHdl;
495 u8 validHandles; 495 u8 validHandles;
496 u8 reserved; 496 u8 reserved;
497 u16 devHandle[2]; 497 __le16 devHandle[2];
498}; 498};
499 499
500struct MR_ARRAY_INFO { 500struct MR_ARRAY_INFO {
501 u16 pd[MAX_RAIDMAP_ROW_SIZE]; 501 __le16 pd[MAX_RAIDMAP_ROW_SIZE];
502}; 502};
503 503
504struct MR_QUAD_ELEMENT { 504struct MR_QUAD_ELEMENT {
505 u64 logStart; 505 __le64 logStart;
506 u64 logEnd; 506 __le64 logEnd;
507 u64 offsetInSpan; 507 __le64 offsetInSpan;
508 u32 diff; 508 __le32 diff;
509 u32 reserved1; 509 __le32 reserved1;
510}; 510};
511 511
512struct MR_SPAN_INFO { 512struct MR_SPAN_INFO {
513 u32 noElements; 513 __le32 noElements;
514 u32 reserved1; 514 __le32 reserved1;
515 struct MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH]; 515 struct MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH];
516}; 516};
517 517
518struct MR_LD_SPAN { 518struct MR_LD_SPAN {
519 u64 startBlk; 519 __le64 startBlk;
520 u64 numBlks; 520 __le64 numBlks;
521 u16 arrayRef; 521 __le16 arrayRef;
522 u8 spanRowSize; 522 u8 spanRowSize;
523 u8 spanRowDataSize; 523 u8 spanRowDataSize;
524 u8 reserved[4]; 524 u8 reserved[4];
525}; 525};
526 526
527struct MR_SPAN_BLOCK_INFO { 527struct MR_SPAN_BLOCK_INFO {
528 u64 num_rows; 528 __le64 num_rows;
529 struct MR_LD_SPAN span; 529 struct MR_LD_SPAN span;
530 struct MR_SPAN_INFO block_span_info; 530 struct MR_SPAN_INFO block_span_info;
531}; 531};
@@ -558,8 +558,8 @@ struct MR_LD_RAID {
558 u32 reserved4:7; 558 u32 reserved4:7;
559#endif 559#endif
560 } capability; 560 } capability;
561 u32 reserved6; 561 __le32 reserved6;
562 u64 size; 562 __le64 size;
563 u8 spanDepth; 563 u8 spanDepth;
564 u8 level; 564 u8 level;
565 u8 stripeShift; 565 u8 stripeShift;
@@ -568,12 +568,12 @@ struct MR_LD_RAID {
568 u8 writeMode; 568 u8 writeMode;
569 u8 PRL; 569 u8 PRL;
570 u8 SRL; 570 u8 SRL;
571 u16 targetId; 571 __le16 targetId;
572 u8 ldState; 572 u8 ldState;
573 u8 regTypeReqOnWrite; 573 u8 regTypeReqOnWrite;
574 u8 modFactor; 574 u8 modFactor;
575 u8 regTypeReqOnRead; 575 u8 regTypeReqOnRead;
576 u16 seqNum; 576 __le16 seqNum;
577 577
578 struct { 578 struct {
579 u32 ldSyncRequired:1; 579 u32 ldSyncRequired:1;
@@ -592,20 +592,20 @@ struct MR_LD_SPAN_MAP {
592}; 592};
593 593
594struct MR_FW_RAID_MAP { 594struct MR_FW_RAID_MAP {
595 u32 totalSize; 595 __le32 totalSize;
596 union { 596 union {
597 struct { 597 struct {
598 u32 maxLd; 598 __le32 maxLd;
599 u32 maxSpanDepth; 599 __le32 maxSpanDepth;
600 u32 maxRowSize; 600 __le32 maxRowSize;
601 u32 maxPdCount; 601 __le32 maxPdCount;
602 u32 maxArrays; 602 __le32 maxArrays;
603 } validationInfo; 603 } validationInfo;
604 u32 version[5]; 604 __le32 version[5];
605 }; 605 };
606 606
607 u32 ldCount; 607 __le32 ldCount;
608 u32 Reserved1; 608 __le32 Reserved1;
609 u8 ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+ 609 u8 ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+
610 MAX_RAIDMAP_VIEWS]; 610 MAX_RAIDMAP_VIEWS];
611 u8 fpPdIoTimeoutSec; 611 u8 fpPdIoTimeoutSec;
@@ -620,7 +620,7 @@ struct IO_REQUEST_INFO {
620 u32 numBlocks; 620 u32 numBlocks;
621 u16 ldTgtId; 621 u16 ldTgtId;
622 u8 isRead; 622 u8 isRead;
623 u16 devHandle; 623 __le16 devHandle;
624 u64 pdBlock; 624 u64 pdBlock;
625 u8 fpOkForIo; 625 u8 fpOkForIo;
626 u8 IoforUnevenSpan; 626 u8 IoforUnevenSpan;
@@ -634,7 +634,7 @@ struct IO_REQUEST_INFO {
634struct MR_LD_TARGET_SYNC { 634struct MR_LD_TARGET_SYNC {
635 u8 targetId; 635 u8 targetId;
636 u8 reserved; 636 u8 reserved;
637 u16 seqNum; 637 __le16 seqNum;
638}; 638};
639 639
640#define IEEE_SGE_FLAGS_ADDR_MASK (0x03) 640#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
@@ -679,7 +679,6 @@ struct megasas_cmd_fusion {
679 */ 679 */
680 u32 sync_cmd_idx; 680 u32 sync_cmd_idx;
681 u32 index; 681 u32 index;
682 u8 flags;
683 u8 pd_r1_lb; 682 u8 pd_r1_lb;
684}; 683};
685 684
@@ -720,27 +719,27 @@ struct MR_DRV_RAID_MAP {
720 * This feild will be manupulated by driver for ext raid map, 719 * This feild will be manupulated by driver for ext raid map,
721 * else pick the value from firmware raid map. 720 * else pick the value from firmware raid map.
722 */ 721 */
723 u32 totalSize; 722 __le32 totalSize;
724 723
725 union { 724 union {
726 struct { 725 struct {
727 u32 maxLd; 726 __le32 maxLd;
728 u32 maxSpanDepth; 727 __le32 maxSpanDepth;
729 u32 maxRowSize; 728 __le32 maxRowSize;
730 u32 maxPdCount; 729 __le32 maxPdCount;
731 u32 maxArrays; 730 __le32 maxArrays;
732 } validationInfo; 731 } validationInfo;
733 u32 version[5]; 732 __le32 version[5];
734 }; 733 };
735 734
736 /* timeout value used by driver in FP IOs*/ 735 /* timeout value used by driver in FP IOs*/
737 u8 fpPdIoTimeoutSec; 736 u8 fpPdIoTimeoutSec;
738 u8 reserved2[7]; 737 u8 reserved2[7];
739 738
740 u16 ldCount; 739 __le16 ldCount;
741 u16 arCount; 740 __le16 arCount;
742 u16 spanCount; 741 __le16 spanCount;
743 u16 reserve3; 742 __le16 reserve3;
744 743
745 struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; 744 struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
746 u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT]; 745 u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
@@ -779,10 +778,10 @@ struct MR_FW_RAID_MAP_EXT {
779 u8 fpPdIoTimeoutSec; 778 u8 fpPdIoTimeoutSec;
780 u8 reserved2[7]; 779 u8 reserved2[7];
781 780
782 u16 ldCount; 781 __le16 ldCount;
783 u16 arCount; 782 __le16 arCount;
784 u16 spanCount; 783 __le16 spanCount;
785 u16 reserve3; 784 __le16 reserve3;
786 785
787 struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; 786 struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
788 u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT]; 787 u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
@@ -792,10 +791,6 @@ struct MR_FW_RAID_MAP_EXT {
792 791
793struct fusion_context { 792struct fusion_context {
794 struct megasas_cmd_fusion **cmd_list; 793 struct megasas_cmd_fusion **cmd_list;
795 struct list_head cmd_pool;
796
797 spinlock_t mpt_pool_lock;
798
799 dma_addr_t req_frames_desc_phys; 794 dma_addr_t req_frames_desc_phys;
800 u8 *req_frames_desc; 795 u8 *req_frames_desc;
801 796
@@ -839,10 +834,10 @@ struct fusion_context {
839}; 834};
840 835
841union desc_value { 836union desc_value {
842 u64 word; 837 __le64 word;
843 struct { 838 struct {
844 u32 low; 839 __le32 low;
845 u32 high; 840 __le32 high;
846 } u; 841 } u;
847}; 842};
848 843
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 53030b0e8015..d40d734aa53a 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -56,7 +56,6 @@ static struct scsi_host_template mvs_sht = {
56 .change_queue_depth = sas_change_queue_depth, 56 .change_queue_depth = sas_change_queue_depth,
57 .bios_param = sas_bios_param, 57 .bios_param = sas_bios_param,
58 .can_queue = 1, 58 .can_queue = 1,
59 .cmd_per_lun = 1,
60 .this_id = -1, 59 .this_id = -1,
61 .sg_tablesize = SG_ALL, 60 .sg_tablesize = SG_ALL,
62 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 61 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index c6077cefbeca..53c84771f0e8 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -274,7 +274,6 @@ static struct scsi_host_template nsp32_template = {
274 .can_queue = 1, 274 .can_queue = 1,
275 .sg_tablesize = NSP32_SG_SIZE, 275 .sg_tablesize = NSP32_SG_SIZE,
276 .max_sectors = 128, 276 .max_sectors = 128,
277 .cmd_per_lun = 1,
278 .this_id = NSP32_HOST_SCSIID, 277 .this_id = NSP32_HOST_SCSIID,
279 .use_clustering = DISABLE_CLUSTERING, 278 .use_clustering = DISABLE_CLUSTERING,
280 .eh_abort_handler = nsp32_eh_abort, 279 .eh_abort_handler = nsp32_eh_abort,
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 1b6c8833a304..5fb6eefc6541 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -86,7 +86,6 @@ static struct scsi_host_template nsp_driver_template = {
86 .can_queue = 1, 86 .can_queue = 1,
87 .this_id = NSP_INITIATOR_ID, 87 .this_id = NSP_INITIATOR_ID,
88 .sg_tablesize = SG_ALL, 88 .sg_tablesize = SG_ALL,
89 .cmd_per_lun = 1,
90 .use_clustering = DISABLE_CLUSTERING, 89 .use_clustering = DISABLE_CLUSTERING,
91}; 90};
92 91
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index bcaf89fe0c9e..c670dc704c74 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -72,7 +72,6 @@ static struct scsi_host_template qlogicfas_driver_template = {
72 .can_queue = 1, 72 .can_queue = 1,
73 .this_id = -1, 73 .this_id = -1,
74 .sg_tablesize = SG_ALL, 74 .sg_tablesize = SG_ALL,
75 .cmd_per_lun = 1,
76 .use_clustering = DISABLE_CLUSTERING, 75 .use_clustering = DISABLE_CLUSTERING,
77}; 76};
78 77
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 155f9573021f..20011c8afbb5 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -680,7 +680,6 @@ static struct scsi_host_template sym53c500_driver_template = {
680 .can_queue = 1, 680 .can_queue = 1,
681 .this_id = 7, 681 .this_id = 7,
682 .sg_tablesize = 32, 682 .sg_tablesize = 32,
683 .cmd_per_lun = 1,
684 .use_clustering = ENABLE_CLUSTERING, 683 .use_clustering = ENABLE_CLUSTERING,
685 .shost_attrs = SYM53C500_shost_attrs 684 .shost_attrs = SYM53C500_shost_attrs
686}; 685};
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 65555916d3b8..a132f2664d2f 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -78,7 +78,6 @@ static struct scsi_host_template pm8001_sht = {
78 .change_queue_depth = sas_change_queue_depth, 78 .change_queue_depth = sas_change_queue_depth,
79 .bios_param = sas_bios_param, 79 .bios_param = sas_bios_param,
80 .can_queue = 1, 80 .can_queue = 1,
81 .cmd_per_lun = 1,
82 .this_id = -1, 81 .this_id = -1,
83 .sg_tablesize = SG_ALL, 82 .sg_tablesize = SG_ALL,
84 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 83 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 1db8b26063b4..ee00e27ba396 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -974,7 +974,6 @@ static struct scsi_host_template ppa_template = {
974 .bios_param = ppa_biosparam, 974 .bios_param = ppa_biosparam,
975 .this_id = -1, 975 .this_id = -1,
976 .sg_tablesize = SG_ALL, 976 .sg_tablesize = SG_ALL,
977 .cmd_per_lun = 1,
978 .use_clustering = ENABLE_CLUSTERING, 977 .use_clustering = ENABLE_CLUSTERING,
979 .can_queue = 1, 978 .can_queue = 1,
980 .slave_alloc = ppa_adjust_queue, 979 .slave_alloc = ppa_adjust_queue,
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 5298def33733..4924424d20fe 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -347,7 +347,6 @@ static struct scsi_host_template ps3rom_host_template = {
347 .can_queue = 1, 347 .can_queue = 1,
348 .this_id = 7, 348 .this_id = 7,
349 .sg_tablesize = SG_ALL, 349 .sg_tablesize = SG_ALL,
350 .cmd_per_lun = 1,
351 .emulated = 1, /* only sg driver uses this */ 350 .emulated = 1, /* only sg driver uses this */
352 .max_sectors = PS3ROM_MAX_SECTORS, 351 .max_sectors = PS3ROM_MAX_SECTORS,
353 .use_clustering = ENABLE_CLUSTERING, 352 .use_clustering = ENABLE_CLUSTERING,
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index c68a66e8cfc1..5d0ec42a9317 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4217,7 +4217,6 @@ static struct scsi_host_template qla1280_driver_template = {
4217 .can_queue = 0xfffff, 4217 .can_queue = 0xfffff,
4218 .this_id = -1, 4218 .this_id = -1,
4219 .sg_tablesize = SG_ALL, 4219 .sg_tablesize = SG_ALL,
4220 .cmd_per_lun = 1,
4221 .use_clustering = ENABLE_CLUSTERING, 4220 .use_clustering = ENABLE_CLUSTERING,
4222}; 4221};
4223 4222
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 285cb204f300..664013115c9d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -708,7 +708,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
708 if (rval != QLA_SUCCESS) { 708 if (rval != QLA_SUCCESS) {
709 ql_log(ql_log_warn, vha, 0x00d4, 709 ql_log(ql_log_warn, vha, 0x00d4,
710 "Unable to initialize ISP84XX.\n"); 710 "Unable to initialize ISP84XX.\n");
711 qla84xx_put_chip(vha); 711 qla84xx_put_chip(vha);
712 } 712 }
713 } 713 }
714 714
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a1ab25fca874..36fbd4c7af8f 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2797,10 +2797,10 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2797 handle = req->current_outstanding_cmd; 2797 handle = req->current_outstanding_cmd;
2798 for (index = 1; index < req->num_outstanding_cmds; index++) { 2798 for (index = 1; index < req->num_outstanding_cmds; index++) {
2799 handle++; 2799 handle++;
2800 if (handle == req->num_outstanding_cmds) 2800 if (handle == req->num_outstanding_cmds)
2801 handle = 1; 2801 handle = 1;
2802 if (!req->outstanding_cmds[handle]) 2802 if (!req->outstanding_cmds[handle])
2803 break; 2803 break;
2804 } 2804 }
2805 2805
2806 if (index == req->num_outstanding_cmds) { 2806 if (index == req->num_outstanding_cmds) {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6dc14cd782b2..5559d5e75bbf 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1580,7 +1580,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1580 ql_log(ql_log_warn, fcport->vha, 0x503c, 1580 ql_log(ql_log_warn, fcport->vha, 0x503c,
1581 "Async-%s error - hdl=%x response(%x).\n", 1581 "Async-%s error - hdl=%x response(%x).\n",
1582 type, sp->handle, sts->data[3]); 1582 type, sp->handle, sts->data[3]);
1583 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1583 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1584 } 1584 }
1585 } 1585 }
1586 1586
@@ -1979,7 +1979,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1979 rval = EXT_STATUS_ERR; 1979 rval = EXT_STATUS_ERR;
1980 break; 1980 break;
1981 } 1981 }
1982 bsg_job->reply->reply_payload_rcv_len = 0; 1982 bsg_job->reply->reply_payload_rcv_len = 0;
1983 1983
1984done: 1984done:
1985 /* Return the vendor specific reply to API */ 1985 /* Return the vendor specific reply to API */
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 7d2b18f2675c..1620b0ec977b 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1843,7 +1843,7 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
1843 1843
1844 ptab_desc = qla82xx_get_table_desc(unirom, 1844 ptab_desc = qla82xx_get_table_desc(unirom,
1845 QLA82XX_URI_DIR_SECT_PRODUCT_TBL); 1845 QLA82XX_URI_DIR_SECT_PRODUCT_TBL);
1846 if (!ptab_desc) 1846 if (!ptab_desc)
1847 return -1; 1847 return -1;
1848 1848
1849 entries = cpu_to_le32(ptab_desc->num_entries); 1849 entries = cpu_to_le32(ptab_desc->num_entries);
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index ed4d6b6b53e3..000c57e4d033 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -397,11 +397,11 @@ qla8044_idc_lock(struct qla_hw_data *ha)
397 * has the lock, wait for 2secs 397 * has the lock, wait for 2secs
398 * and retry 398 * and retry
399 */ 399 */
400 ql_dbg(ql_dbg_p3p, vha, 0xb08a, 400 ql_dbg(ql_dbg_p3p, vha, 0xb08a,
401 "%s: IDC lock Recovery by %d " 401 "%s: IDC lock Recovery by %d "
402 "failed, Retrying timeout\n", __func__, 402 "failed, Retrying timeout\n", __func__,
403 ha->portnum); 403 ha->portnum);
404 timeout = 0; 404 timeout = 0;
405 } 405 }
406 } 406 }
407 msleep(QLA8044_DRV_LOCK_MSLEEP); 407 msleep(QLA8044_DRV_LOCK_MSLEEP);
@@ -3141,8 +3141,7 @@ qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
3141 goto error; 3141 goto error;
3142 3142
3143 addr7 = addr2 - (4 * stride1); 3143 addr7 = addr2 - (4 * stride1);
3144 data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, 3144 data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr7);
3145 mask, addr7);
3146 if (data == -1) 3145 if (data == -1)
3147 goto error; 3146 goto error;
3148 3147
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 7462dd70b150..a28815b8276f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4418,7 +4418,10 @@ retry_lock2:
4418void 4418void
4419qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id) 4419qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
4420{ 4420{
4421 uint16_t options = (requester_id << 15) | BIT_7, retry; 4421#if 0
4422 uint16_t options = (requester_id << 15) | BIT_7;
4423#endif
4424 uint16_t retry;
4422 uint32_t data; 4425 uint32_t data;
4423 struct qla_hw_data *ha = base_vha->hw; 4426 struct qla_hw_data *ha = base_vha->hw;
4424 4427
@@ -4454,6 +4457,7 @@ retry_unlock:
4454 4457
4455 return; 4458 return;
4456 4459
4460#if 0
4457 /* XXX: IDC-unlock implementation using access-control mbx */ 4461 /* XXX: IDC-unlock implementation using access-control mbx */
4458 retry = 0; 4462 retry = 0;
4459retry_unlock2: 4463retry_unlock2:
@@ -4469,6 +4473,7 @@ retry_unlock2:
4469 } 4473 }
4470 4474
4471 return; 4475 return;
4476#endif
4472} 4477}
4473 4478
4474int 4479int
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index fe8a8d157e22..4a484d60be0d 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3712,6 +3712,14 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
3712 3712
3713static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) 3713static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
3714{ 3714{
3715#if 1
3716 /*
3717 * FIXME: Reject non zero SRR relative offset until we can test
3718 * this code properly.
3719 */
3720 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
3721 return -1;
3722#else
3715 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL; 3723 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
3716 size_t first_offset = 0, rem_offset = offset, tmp = 0; 3724 size_t first_offset = 0, rem_offset = offset, tmp = 0;
3717 int i, sg_srr_cnt, bufflen = 0; 3725 int i, sg_srr_cnt, bufflen = 0;
@@ -3721,13 +3729,6 @@ static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
3721 "cmd->sg_cnt: %u, direction: %d\n", 3729 "cmd->sg_cnt: %u, direction: %d\n",
3722 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 3730 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
3723 3731
3724 /*
3725 * FIXME: Reject non zero SRR relative offset until we can test
3726 * this code properly.
3727 */
3728 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
3729 return -1;
3730
3731 if (!cmd->sg || !cmd->sg_cnt) { 3732 if (!cmd->sg || !cmd->sg_cnt) {
3732 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055, 3733 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
3733 "Missing cmd->sg or zero cmd->sg_cnt in" 3734 "Missing cmd->sg or zero cmd->sg_cnt in"
@@ -3810,6 +3811,7 @@ static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
3810 BUG(); 3811 BUG();
3811 3812
3812 return 0; 3813 return 0;
3814#endif
3813} 3815}
3814 3816
3815static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd, 3817static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
index a22bb1b40ce2..61cac87fb86f 100644
--- a/drivers/scsi/qlogicfas.c
+++ b/drivers/scsi/qlogicfas.c
@@ -193,7 +193,6 @@ static struct scsi_host_template qlogicfas_driver_template = {
193 .can_queue = 1, 193 .can_queue = 1,
194 .this_id = -1, 194 .this_id = -1,
195 .sg_tablesize = SG_ALL, 195 .sg_tablesize = SG_ALL,
196 .cmd_per_lun = 1,
197 .use_clustering = DISABLE_CLUSTERING, 196 .use_clustering = DISABLE_CLUSTERING,
198}; 197};
199 198
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index fe122700cad8..676385ff28ef 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1287,7 +1287,6 @@ static struct scsi_host_template qpti_template = {
1287 .can_queue = QLOGICPTI_REQ_QUEUE_LEN, 1287 .can_queue = QLOGICPTI_REQ_QUEUE_LEN,
1288 .this_id = 7, 1288 .this_id = 7,
1289 .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN), 1289 .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN),
1290 .cmd_per_lun = 1,
1291 .use_clustering = ENABLE_CLUSTERING, 1290 .use_clustering = ENABLE_CLUSTERING,
1292}; 1291};
1293 1292
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 3833bf59fb66..207d6a7a1bd0 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -98,52 +98,6 @@ EXPORT_SYMBOL(scsi_sd_probe_domain);
98ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain); 98ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
99EXPORT_SYMBOL(scsi_sd_pm_domain); 99EXPORT_SYMBOL(scsi_sd_pm_domain);
100 100
101/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
102 * You may not alter any existing entry (although adding new ones is
103 * encouraged once assigned by ANSI/INCITS T10
104 */
105static const char *const scsi_device_types[] = {
106 "Direct-Access ",
107 "Sequential-Access",
108 "Printer ",
109 "Processor ",
110 "WORM ",
111 "CD-ROM ",
112 "Scanner ",
113 "Optical Device ",
114 "Medium Changer ",
115 "Communications ",
116 "ASC IT8 ",
117 "ASC IT8 ",
118 "RAID ",
119 "Enclosure ",
120 "Direct-Access-RBC",
121 "Optical card ",
122 "Bridge controller",
123 "Object storage ",
124 "Automation/Drive ",
125 "Security Manager ",
126 "Direct-Access-ZBC",
127};
128
129/**
130 * scsi_device_type - Return 17 char string indicating device type.
131 * @type: type number to look up
132 */
133
134const char * scsi_device_type(unsigned type)
135{
136 if (type == 0x1e)
137 return "Well-known LUN ";
138 if (type == 0x1f)
139 return "No Device ";
140 if (type >= ARRAY_SIZE(scsi_device_types))
141 return "Unknown ";
142 return scsi_device_types[type];
143}
144
145EXPORT_SYMBOL(scsi_device_type);
146
147struct scsi_host_cmd_pool { 101struct scsi_host_cmd_pool {
148 struct kmem_cache *cmd_slab; 102 struct kmem_cache *cmd_slab;
149 struct kmem_cache *sense_slab; 103 struct kmem_cache *sense_slab;
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
new file mode 100644
index 000000000000..2ff092252b76
--- /dev/null
+++ b/drivers/scsi/scsi_common.c
@@ -0,0 +1,178 @@
1/*
2 * SCSI functions used by both the initiator and the target code.
3 */
4
5#include <linux/bug.h>
6#include <linux/kernel.h>
7#include <linux/string.h>
8#include <scsi/scsi_common.h>
9
10/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
11 * You may not alter any existing entry (although adding new ones is
12 * encouraged once assigned by ANSI/INCITS T10
13 */
14static const char *const scsi_device_types[] = {
15 "Direct-Access ",
16 "Sequential-Access",
17 "Printer ",
18 "Processor ",
19 "WORM ",
20 "CD-ROM ",
21 "Scanner ",
22 "Optical Device ",
23 "Medium Changer ",
24 "Communications ",
25 "ASC IT8 ",
26 "ASC IT8 ",
27 "RAID ",
28 "Enclosure ",
29 "Direct-Access-RBC",
30 "Optical card ",
31 "Bridge controller",
32 "Object storage ",
33 "Automation/Drive ",
34 "Security Manager ",
35 "Direct-Access-ZBC",
36};
37
38/**
39 * scsi_device_type - Return 17 char string indicating device type.
40 * @type: type number to look up
41 */
42const char *scsi_device_type(unsigned type)
43{
44 if (type == 0x1e)
45 return "Well-known LUN ";
46 if (type == 0x1f)
47 return "No Device ";
48 if (type >= ARRAY_SIZE(scsi_device_types))
49 return "Unknown ";
50 return scsi_device_types[type];
51}
52EXPORT_SYMBOL(scsi_device_type);
53
54/**
55 * scsilun_to_int - convert a scsi_lun to an int
56 * @scsilun: struct scsi_lun to be converted.
57 *
58 * Description:
59 * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered
60 * integer, and return the result. The caller must check for
61 * truncation before using this function.
62 *
63 * Notes:
64 * For a description of the LUN format, post SCSI-3 see the SCSI
65 * Architecture Model, for SCSI-3 see the SCSI Controller Commands.
66 *
67 * Given a struct scsi_lun of: d2 04 0b 03 00 00 00 00, this function
68 * returns the integer: 0x0b03d204
69 *
70 * This encoding will return a standard integer LUN for LUNs smaller
71 * than 256, which typically use a single level LUN structure with
72 * addressing method 0.
73 */
74u64 scsilun_to_int(struct scsi_lun *scsilun)
75{
76 int i;
77 u64 lun;
78
79 lun = 0;
80 for (i = 0; i < sizeof(lun); i += 2)
81 lun = lun | (((u64)scsilun->scsi_lun[i] << ((i + 1) * 8)) |
82 ((u64)scsilun->scsi_lun[i + 1] << (i * 8)));
83 return lun;
84}
85EXPORT_SYMBOL(scsilun_to_int);
86
87/**
88 * int_to_scsilun - reverts an int into a scsi_lun
89 * @lun: integer to be reverted
90 * @scsilun: struct scsi_lun to be set.
91 *
92 * Description:
93 * Reverts the functionality of the scsilun_to_int, which packed
94 * an 8-byte lun value into an int. This routine unpacks the int
95 * back into the lun value.
96 *
97 * Notes:
98 * Given an integer : 0x0b03d204, this function returns a
99 * struct scsi_lun of: d2 04 0b 03 00 00 00 00
100 *
101 */
102void int_to_scsilun(u64 lun, struct scsi_lun *scsilun)
103{
104 int i;
105
106 memset(scsilun->scsi_lun, 0, sizeof(scsilun->scsi_lun));
107
108 for (i = 0; i < sizeof(lun); i += 2) {
109 scsilun->scsi_lun[i] = (lun >> 8) & 0xFF;
110 scsilun->scsi_lun[i+1] = lun & 0xFF;
111 lun = lun >> 16;
112 }
113}
114EXPORT_SYMBOL(int_to_scsilun);
115
116/**
117 * scsi_normalize_sense - normalize main elements from either fixed or
118 * descriptor sense data format into a common format.
119 *
120 * @sense_buffer: byte array containing sense data returned by device
121 * @sb_len: number of valid bytes in sense_buffer
122 * @sshdr: pointer to instance of structure that common
123 * elements are written to.
124 *
125 * Notes:
126 * The "main elements" from sense data are: response_code, sense_key,
127 * asc, ascq and additional_length (only for descriptor format).
128 *
129 * Typically this function can be called after a device has
130 * responded to a SCSI command with the CHECK_CONDITION status.
131 *
132 * Return value:
133 * true if valid sense data information found, else false;
134 */
135bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
136 struct scsi_sense_hdr *sshdr)
137{
138 if (!sense_buffer || !sb_len)
139 return false;
140
141 memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
142
143 sshdr->response_code = (sense_buffer[0] & 0x7f);
144
145 if (!scsi_sense_valid(sshdr))
146 return false;
147
148 if (sshdr->response_code >= 0x72) {
149 /*
150 * descriptor format
151 */
152 if (sb_len > 1)
153 sshdr->sense_key = (sense_buffer[1] & 0xf);
154 if (sb_len > 2)
155 sshdr->asc = sense_buffer[2];
156 if (sb_len > 3)
157 sshdr->ascq = sense_buffer[3];
158 if (sb_len > 7)
159 sshdr->additional_length = sense_buffer[7];
160 } else {
161 /*
162 * fixed format
163 */
164 if (sb_len > 2)
165 sshdr->sense_key = (sense_buffer[2] & 0xf);
166 if (sb_len > 7) {
167 sb_len = (sb_len < (sense_buffer[7] + 8)) ?
168 sb_len : (sense_buffer[7] + 8);
169 if (sb_len > 12)
170 sshdr->asc = sense_buffer[12];
171 if (sb_len > 13)
172 sshdr->ascq = sense_buffer[13];
173 }
174 }
175
176 return true;
177}
178EXPORT_SYMBOL(scsi_normalize_sense);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index c95a4e943fc6..106884a5444e 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2399,70 +2399,6 @@ out_put_autopm_host:
2399} 2399}
2400EXPORT_SYMBOL(scsi_ioctl_reset); 2400EXPORT_SYMBOL(scsi_ioctl_reset);
2401 2401
2402/**
2403 * scsi_normalize_sense - normalize main elements from either fixed or
2404 * descriptor sense data format into a common format.
2405 *
2406 * @sense_buffer: byte array containing sense data returned by device
2407 * @sb_len: number of valid bytes in sense_buffer
2408 * @sshdr: pointer to instance of structure that common
2409 * elements are written to.
2410 *
2411 * Notes:
2412 * The "main elements" from sense data are: response_code, sense_key,
2413 * asc, ascq and additional_length (only for descriptor format).
2414 *
2415 * Typically this function can be called after a device has
2416 * responded to a SCSI command with the CHECK_CONDITION status.
2417 *
2418 * Return value:
2419 * true if valid sense data information found, else false;
2420 */
2421bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
2422 struct scsi_sense_hdr *sshdr)
2423{
2424 if (!sense_buffer || !sb_len)
2425 return false;
2426
2427 memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
2428
2429 sshdr->response_code = (sense_buffer[0] & 0x7f);
2430
2431 if (!scsi_sense_valid(sshdr))
2432 return false;
2433
2434 if (sshdr->response_code >= 0x72) {
2435 /*
2436 * descriptor format
2437 */
2438 if (sb_len > 1)
2439 sshdr->sense_key = (sense_buffer[1] & 0xf);
2440 if (sb_len > 2)
2441 sshdr->asc = sense_buffer[2];
2442 if (sb_len > 3)
2443 sshdr->ascq = sense_buffer[3];
2444 if (sb_len > 7)
2445 sshdr->additional_length = sense_buffer[7];
2446 } else {
2447 /*
2448 * fixed format
2449 */
2450 if (sb_len > 2)
2451 sshdr->sense_key = (sense_buffer[2] & 0xf);
2452 if (sb_len > 7) {
2453 sb_len = (sb_len < (sense_buffer[7] + 8)) ?
2454 sb_len : (sense_buffer[7] + 8);
2455 if (sb_len > 12)
2456 sshdr->asc = sense_buffer[12];
2457 if (sb_len > 13)
2458 sshdr->ascq = sense_buffer[13];
2459 }
2460 }
2461
2462 return true;
2463}
2464EXPORT_SYMBOL(scsi_normalize_sense);
2465
2466bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd, 2402bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
2467 struct scsi_sense_hdr *sshdr) 2403 struct scsi_sense_hdr *sshdr)
2468{ 2404{
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 6efab1c455e1..f9f3f8203d42 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -280,7 +280,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
280 sdev->host->cmd_per_lun, shost->bqt, 280 sdev->host->cmd_per_lun, shost->bqt,
281 shost->hostt->tag_alloc_policy); 281 shost->hostt->tag_alloc_policy);
282 } 282 }
283 scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun); 283 scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ?
284 sdev->host->cmd_per_lun : 1);
284 285
285 scsi_sysfs_device_initialize(sdev); 286 scsi_sysfs_device_initialize(sdev);
286 287
@@ -1269,68 +1270,6 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
1269} 1270}
1270 1271
1271/** 1272/**
1272 * scsilun_to_int - convert a scsi_lun to an int
1273 * @scsilun: struct scsi_lun to be converted.
1274 *
1275 * Description:
1276 * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered
1277 * integer, and return the result. The caller must check for
1278 * truncation before using this function.
1279 *
1280 * Notes:
1281 * For a description of the LUN format, post SCSI-3 see the SCSI
1282 * Architecture Model, for SCSI-3 see the SCSI Controller Commands.
1283 *
1284 * Given a struct scsi_lun of: d2 04 0b 03 00 00 00 00, this function
1285 * returns the integer: 0x0b03d204
1286 *
1287 * This encoding will return a standard integer LUN for LUNs smaller
1288 * than 256, which typically use a single level LUN structure with
1289 * addressing method 0.
1290 **/
1291u64 scsilun_to_int(struct scsi_lun *scsilun)
1292{
1293 int i;
1294 u64 lun;
1295
1296 lun = 0;
1297 for (i = 0; i < sizeof(lun); i += 2)
1298 lun = lun | (((u64)scsilun->scsi_lun[i] << ((i + 1) * 8)) |
1299 ((u64)scsilun->scsi_lun[i + 1] << (i * 8)));
1300 return lun;
1301}
1302EXPORT_SYMBOL(scsilun_to_int);
1303
1304/**
1305 * int_to_scsilun - reverts an int into a scsi_lun
1306 * @lun: integer to be reverted
1307 * @scsilun: struct scsi_lun to be set.
1308 *
1309 * Description:
1310 * Reverts the functionality of the scsilun_to_int, which packed
1311 * an 8-byte lun value into an int. This routine unpacks the int
1312 * back into the lun value.
1313 *
1314 * Notes:
1315 * Given an integer : 0x0b03d204, this function returns a
1316 * struct scsi_lun of: d2 04 0b 03 00 00 00 00
1317 *
1318 **/
1319void int_to_scsilun(u64 lun, struct scsi_lun *scsilun)
1320{
1321 int i;
1322
1323 memset(scsilun->scsi_lun, 0, sizeof(scsilun->scsi_lun));
1324
1325 for (i = 0; i < sizeof(lun); i += 2) {
1326 scsilun->scsi_lun[i] = (lun >> 8) & 0xFF;
1327 scsilun->scsi_lun[i+1] = lun & 0xFF;
1328 lun = lun >> 16;
1329 }
1330}
1331EXPORT_SYMBOL(int_to_scsilun);
1332
1333/**
1334 * scsi_report_lun_scan - Scan using SCSI REPORT LUN results 1273 * scsi_report_lun_scan - Scan using SCSI REPORT LUN results
1335 * @starget: which target 1274 * @starget: which target
1336 * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN 1275 * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 67d43e35693d..55647aae065c 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -204,6 +204,8 @@ iscsi_create_endpoint(int dd_size)
204 iscsi_match_epid); 204 iscsi_match_epid);
205 if (!dev) 205 if (!dev)
206 break; 206 break;
207 else
208 put_device(dev);
207 } 209 }
208 if (id == ISCSI_MAX_EPID) { 210 if (id == ISCSI_MAX_EPID) {
209 printk(KERN_ERR "Too many connections. Max supported %u\n", 211 printk(KERN_ERR "Too many connections. Max supported %u\n",
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 7f9d65fe4fd9..3b2fcb4fada0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2988,7 +2988,8 @@ static int sd_probe(struct device *dev)
2988 sdkp->dev.class = &sd_disk_class; 2988 sdkp->dev.class = &sd_disk_class;
2989 dev_set_name(&sdkp->dev, "%s", dev_name(dev)); 2989 dev_set_name(&sdkp->dev, "%s", dev_name(dev));
2990 2990
2991 if (device_add(&sdkp->dev)) 2991 error = device_add(&sdkp->dev);
2992 if (error)
2992 goto out_free_index; 2993 goto out_free_index;
2993 2994
2994 get_device(dev); 2995 get_device(dev);
diff --git a/drivers/scsi/snic/Makefile b/drivers/scsi/snic/Makefile
new file mode 100644
index 000000000000..ef7c0dd47f40
--- /dev/null
+++ b/drivers/scsi/snic/Makefile
@@ -0,0 +1,17 @@
1obj-$(CONFIG_SCSI_SNIC) += snic.o
2
3snic-y := \
4 snic_attrs.o \
5 snic_main.o \
6 snic_res.o \
7 snic_isr.o \
8 snic_ctl.o \
9 snic_io.o \
10 snic_scsi.o \
11 snic_disc.o \
12 vnic_cq.o \
13 vnic_intr.o \
14 vnic_dev.o \
15 vnic_wq.o
16
17snic-$(CONFIG_SCSI_SNIC_DEBUG_FS) += snic_debugfs.o snic_trc.o
diff --git a/drivers/scsi/snic/cq_desc.h b/drivers/scsi/snic/cq_desc.h
new file mode 100644
index 000000000000..a5290562c1fa
--- /dev/null
+++ b/drivers/scsi/snic/cq_desc.h
@@ -0,0 +1,77 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef _CQ_DESC_H_
19#define _CQ_DESC_H_
20
21/*
22 * Completion queue descriptor types
23 */
24enum cq_desc_types {
25 CQ_DESC_TYPE_WQ_ENET = 0,
26 CQ_DESC_TYPE_DESC_COPY = 1,
27 CQ_DESC_TYPE_WQ_EXCH = 2,
28 CQ_DESC_TYPE_RQ_ENET = 3,
29 CQ_DESC_TYPE_RQ_FCP = 4,
30};
31
32/* Completion queue descriptor: 16B
33 *
34 * All completion queues have this basic layout. The
35 * type_specific area is unique for each completion
36 * queue type.
37 */
38struct cq_desc {
39 __le16 completed_index;
40 __le16 q_number;
41 u8 type_specific[11];
42 u8 type_color;
43};
44
45#define CQ_DESC_TYPE_BITS 4
46#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
47#define CQ_DESC_COLOR_MASK 1
48#define CQ_DESC_COLOR_SHIFT 7
49#define CQ_DESC_Q_NUM_BITS 10
50#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
51#define CQ_DESC_COMP_NDX_BITS 12
52#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
53
54static inline void cq_desc_dec(const struct cq_desc *desc_arg,
55 u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
56{
57 const struct cq_desc *desc = desc_arg;
58 const u8 type_color = desc->type_color;
59
60 *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
61
62 /*
63 * Make sure color bit is read from desc *before* other fields
64 * are read from desc. Hardware guarantees color bit is last
65 * bit (byte) written. Adding the rmb() prevents the compiler
66 * and/or CPU from reordering the reads which would potentially
67 * result in reading stale values.
68 */
69 rmb();
70
71 *type = type_color & CQ_DESC_TYPE_MASK;
72 *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
73 *completed_index = le16_to_cpu(desc->completed_index) &
74 CQ_DESC_COMP_NDX_MASK;
75}
76
77#endif /* _CQ_DESC_H_ */
diff --git a/drivers/scsi/snic/cq_enet_desc.h b/drivers/scsi/snic/cq_enet_desc.h
new file mode 100644
index 000000000000..0a1be2ed0288
--- /dev/null
+++ b/drivers/scsi/snic/cq_enet_desc.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef _CQ_ENET_DESC_H_
19#define _CQ_ENET_DESC_H_
20
21#include "cq_desc.h"
22
23/* Ethernet completion queue descriptor: 16B */
24struct cq_enet_wq_desc {
25 __le16 completed_index;
26 __le16 q_number;
27 u8 reserved[11];
28 u8 type_color;
29};
30
31static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
32 u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
33{
34 cq_desc_dec((struct cq_desc *)desc, type,
35 color, q_number, completed_index);
36}
37
38#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h
new file mode 100644
index 000000000000..d7f5ba6ba84c
--- /dev/null
+++ b/drivers/scsi/snic/snic.h
@@ -0,0 +1,414 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef _SNIC_H_
19#define _SNIC_H_
20
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/workqueue.h>
24#include <linux/bitops.h>
25#include <linux/mempool.h>
26#include <scsi/scsi_cmnd.h>
27#include <scsi/scsi.h>
28#include <scsi/scsi_host.h>
29
30#include "snic_disc.h"
31#include "snic_io.h"
32#include "snic_res.h"
33#include "snic_trc.h"
34#include "snic_stats.h"
35#include "vnic_dev.h"
36#include "vnic_wq.h"
37#include "vnic_cq.h"
38#include "vnic_intr.h"
39#include "vnic_stats.h"
40#include "vnic_snic.h"
41
42#define SNIC_DRV_NAME "snic"
43#define SNIC_DRV_DESCRIPTION "Cisco SCSI NIC Driver"
44#define SNIC_DRV_VERSION "0.0.1.18"
45#define PFX SNIC_DRV_NAME ":"
46#define DFX SNIC_DRV_NAME "%d: "
47
48#define DESC_CLEAN_LOW_WATERMARK 8
49#define SNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
50#define SNIC_MAX_IO_REQ 50 /* scsi_cmnd tag map entries */
51#define SNIC_MIN_IO_REQ 8 /* Min IO throttle count */
52#define SNIC_IO_LOCKS 64 /* IO locks: power of 2 */
53#define SNIC_DFLT_QUEUE_DEPTH 32 /* Default Queue Depth */
54#define SNIC_MAX_QUEUE_DEPTH 64 /* Max Queue Depth */
55#define SNIC_DFLT_CMD_TIMEOUT 90 /* Extended tmo for FW */
56
57/*
58 * Tag bits used for special requests.
59 */
60#define SNIC_TAG_ABORT BIT(30) /* Tag indicating abort */
61#define SNIC_TAG_DEV_RST BIT(29) /* Tag for device reset */
62#define SNIC_TAG_IOCTL_DEV_RST BIT(28) /* Tag for User Device Reset */
63#define SNIC_TAG_MASK (BIT(24) - 1) /* Mask for lookup */
64#define SNIC_NO_TAG -1
65
66/*
67 * Command flags to identify the type of command and for other future use
68 */
69#define SNIC_NO_FLAGS 0
70#define SNIC_IO_INITIALIZED BIT(0)
71#define SNIC_IO_ISSUED BIT(1)
72#define SNIC_IO_DONE BIT(2)
73#define SNIC_IO_REQ_NULL BIT(3)
74#define SNIC_IO_ABTS_PENDING BIT(4)
75#define SNIC_IO_ABORTED BIT(5)
76#define SNIC_IO_ABTS_ISSUED BIT(6)
77#define SNIC_IO_TERM_ISSUED BIT(7)
78#define SNIC_IO_ABTS_TIMEDOUT BIT(8)
79#define SNIC_IO_ABTS_TERM_DONE BIT(9)
80#define SNIC_IO_ABTS_TERM_REQ_NULL BIT(10)
81#define SNIC_IO_ABTS_TERM_TIMEDOUT BIT(11)
82#define SNIC_IO_INTERNAL_TERM_PENDING BIT(12)
83#define SNIC_IO_INTERNAL_TERM_ISSUED BIT(13)
84#define SNIC_DEVICE_RESET BIT(14)
85#define SNIC_DEV_RST_ISSUED BIT(15)
86#define SNIC_DEV_RST_TIMEDOUT BIT(16)
87#define SNIC_DEV_RST_ABTS_ISSUED BIT(17)
88#define SNIC_DEV_RST_TERM_ISSUED BIT(18)
89#define SNIC_DEV_RST_DONE BIT(19)
90#define SNIC_DEV_RST_REQ_NULL BIT(20)
91#define SNIC_DEV_RST_ABTS_DONE BIT(21)
92#define SNIC_DEV_RST_TERM_DONE BIT(22)
93#define SNIC_DEV_RST_ABTS_PENDING BIT(23)
94#define SNIC_DEV_RST_PENDING BIT(24)
95#define SNIC_DEV_RST_NOTSUP BIT(25)
96#define SNIC_SCSI_CLEANUP BIT(26)
97#define SNIC_HOST_RESET_ISSUED BIT(27)
98
99#define SNIC_ABTS_TIMEOUT 30000 /* msec */
100#define SNIC_LUN_RESET_TIMEOUT 30000 /* msec */
101#define SNIC_HOST_RESET_TIMEOUT 30000 /* msec */
102
103
104/*
105 * These are protected by the hashed req_lock.
106 */
107#define CMD_SP(Cmnd) \
108 (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->rqi)
109#define CMD_STATE(Cmnd) \
110 (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->state)
111#define CMD_ABTS_STATUS(Cmnd) \
112 (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->abts_status)
113#define CMD_LR_STATUS(Cmnd) \
114 (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->lr_status)
115#define CMD_FLAGS(Cmnd) \
116 (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->flags)
117
118#define SNIC_INVALID_CODE 0x100 /* Hdr Status val unused by firmware */
119
120#define SNIC_MAX_TARGET 256
121#define SNIC_FLAGS_NONE (0)
122
123/* snic module params */
124extern unsigned int snic_max_qdepth;
125
126/* snic debugging */
127extern unsigned int snic_log_level;
128
129#define SNIC_MAIN_LOGGING 0x1
130#define SNIC_SCSI_LOGGING 0x2
131#define SNIC_ISR_LOGGING 0x8
132#define SNIC_DESC_LOGGING 0x10
133
134#define SNIC_CHECK_LOGGING(LEVEL, CMD) \
135do { \
136 if (unlikely(snic_log_level & LEVEL)) \
137 do { \
138 CMD; \
139 } while (0); \
140} while (0)
141
142#define SNIC_MAIN_DBG(host, fmt, args...) \
143 SNIC_CHECK_LOGGING(SNIC_MAIN_LOGGING, \
144 shost_printk(KERN_INFO, host, fmt, ## args);)
145
146#define SNIC_SCSI_DBG(host, fmt, args...) \
147 SNIC_CHECK_LOGGING(SNIC_SCSI_LOGGING, \
148 shost_printk(KERN_INFO, host, fmt, ##args);)
149
150#define SNIC_DISC_DBG(host, fmt, args...) \
151 SNIC_CHECK_LOGGING(SNIC_SCSI_LOGGING, \
152 shost_printk(KERN_INFO, host, fmt, ##args);)
153
154#define SNIC_ISR_DBG(host, fmt, args...) \
155 SNIC_CHECK_LOGGING(SNIC_ISR_LOGGING, \
156 shost_printk(KERN_INFO, host, fmt, ##args);)
157
158#define SNIC_HOST_ERR(host, fmt, args...) \
159 shost_printk(KERN_ERR, host, fmt, ##args)
160
161#define SNIC_HOST_INFO(host, fmt, args...) \
162 shost_printk(KERN_INFO, host, fmt, ##args)
163
164#define SNIC_INFO(fmt, args...) \
165 pr_info(PFX fmt, ## args)
166
167#define SNIC_DBG(fmt, args...) \
168 pr_info(PFX fmt, ## args)
169
170#define SNIC_ERR(fmt, args...) \
171 pr_err(PFX fmt, ## args)
172
173#ifdef DEBUG
174#define SNIC_BUG_ON(EXPR) \
175 ({ \
176 if (EXPR) { \
177 SNIC_ERR("SNIC BUG(%s)\n", #EXPR); \
178 BUG_ON(EXPR); \
179 } \
180 })
181#else
182#define SNIC_BUG_ON(EXPR) \
183 ({ \
184 if (EXPR) { \
185 SNIC_ERR("SNIC BUG(%s) at %s : %d\n", \
186 #EXPR, __func__, __LINE__); \
187 WARN_ON_ONCE(EXPR); \
188 } \
189 })
190#endif
191
192/* Soft assert */
193#define SNIC_ASSERT_NOT_IMPL(EXPR) \
194 ({ \
195 if (EXPR) {\
196 SNIC_INFO("Functionality not impl'ed at %s:%d\n", \
197 __func__, __LINE__); \
198 WARN_ON_ONCE(EXPR); \
199 } \
200 })
201
202
203extern const char *snic_state_str[];
204
205enum snic_intx_intr_index {
206 SNIC_INTX_WQ_RQ_COPYWQ,
207 SNIC_INTX_ERR,
208 SNIC_INTX_NOTIFY,
209 SNIC_INTX_INTR_MAX,
210};
211
212enum snic_msix_intr_index {
213 SNIC_MSIX_WQ,
214 SNIC_MSIX_IO_CMPL,
215 SNIC_MSIX_ERR_NOTIFY,
216 SNIC_MSIX_INTR_MAX,
217};
218
219struct snic_msix_entry {
220 int requested;
221 char devname[IFNAMSIZ];
222 irqreturn_t (*isr)(int, void *);
223 void *devid;
224};
225
226enum snic_state {
227 SNIC_INIT = 0,
228 SNIC_ERROR,
229 SNIC_ONLINE,
230 SNIC_OFFLINE,
231 SNIC_FWRESET,
232};
233
234#define SNIC_WQ_MAX 1
235#define SNIC_CQ_IO_CMPL_MAX 1
236#define SNIC_CQ_MAX (SNIC_WQ_MAX + SNIC_CQ_IO_CMPL_MAX)
237
238/* firmware version information */
239struct snic_fw_info {
240 u32 fw_ver;
241 u32 hid; /* u16 hid | u16 vnic id */
242 u32 max_concur_ios; /* max concurrent ios */
243 u32 max_sgs_per_cmd; /* max sgls per IO */
244 u32 max_io_sz; /* max io size supported */
245 u32 hba_cap; /* hba capabilities */
246 u32 max_tgts; /* max tgts supported */
247 u16 io_tmo; /* FW Extended timeout */
248 struct completion *wait; /* protected by snic lock*/
249};
250
251/*
252 * snic_work item : defined to process asynchronous events
253 */
254struct snic_work {
255 struct work_struct work;
256 u16 ev_id;
257 u64 *ev_data;
258};
259
260/*
261 * snic structure to represent SCSI vNIC
262 */
263struct snic {
264 /* snic specific members */
265 struct list_head list;
266 char name[IFNAMSIZ];
267 atomic_t state;
268 spinlock_t snic_lock;
269 struct completion *remove_wait;
270 bool in_remove;
271 bool stop_link_events; /* stop processing link events */
272
273 /* discovery related */
274 struct snic_disc disc;
275
276 /* Scsi Host info */
277 struct Scsi_Host *shost;
278
279 /* vnic related structures */
280 struct vnic_dev_bar bar0;
281
282 struct vnic_stats *stats;
283 unsigned long stats_time;
284 unsigned long stats_reset_time;
285
286 struct vnic_dev *vdev;
287
288 /* hw resource info */
289 unsigned int wq_count;
290 unsigned int cq_count;
291 unsigned int intr_count;
292 unsigned int err_intr_offset;
293
294 int link_status; /* retrieved from svnic_dev_link_status() */
295 u32 link_down_cnt;
296
297 /* pci related */
298 struct pci_dev *pdev;
299 struct msix_entry msix_entry[SNIC_MSIX_INTR_MAX];
300 struct snic_msix_entry msix[SNIC_MSIX_INTR_MAX];
301
302 /* io related info */
303 mempool_t *req_pool[SNIC_REQ_MAX_CACHES]; /* (??) */
304 ____cacheline_aligned spinlock_t io_req_lock[SNIC_IO_LOCKS];
305
306 /* Maintain snic specific commands, cmds with no tag in spl_cmd_list */
307 ____cacheline_aligned spinlock_t spl_cmd_lock;
308 struct list_head spl_cmd_list;
309
310 unsigned int max_tag_id;
311 atomic_t ios_inflight; /* io in flight counter */
312
313 struct vnic_snic_config config;
314
315 struct work_struct link_work;
316
317 /* firmware information */
318 struct snic_fw_info fwinfo;
319
320 /* Work for processing Target related work */
321 struct work_struct tgt_work;
322
323 /* Work for processing Discovery */
324 struct work_struct disc_work;
325
326 /* stats related */
327 unsigned int reset_stats;
328 atomic64_t io_cmpl_skip;
329 struct snic_stats s_stats; /* Per SNIC driver stats */
330
331 /* platform specific */
332#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
333 struct dentry *stats_host; /* Per snic debugfs root */
334 struct dentry *stats_file; /* Per snic debugfs file */
335 struct dentry *reset_stats_file;/* Per snic reset stats file */
336#endif
337
338 /* completion queue cache line section */
339 ____cacheline_aligned struct vnic_cq cq[SNIC_CQ_MAX];
340
341 /* work queue cache line section */
342 ____cacheline_aligned struct vnic_wq wq[SNIC_WQ_MAX];
343 spinlock_t wq_lock[SNIC_WQ_MAX];
344
345 /* interrupt resource cache line section */
346 ____cacheline_aligned struct vnic_intr intr[SNIC_MSIX_INTR_MAX];
347}; /* end of snic structure */
348
349/*
350 * SNIC Driver's Global Data
351 */
352struct snic_global {
353 struct list_head snic_list;
354 spinlock_t snic_list_lock;
355
356 struct kmem_cache *req_cache[SNIC_REQ_MAX_CACHES];
357
358 struct workqueue_struct *event_q;
359
360#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
361 /* debugfs related global data */
362 struct dentry *trc_root;
363 struct dentry *stats_root;
364
365 struct snic_trc trc ____cacheline_aligned;
366#endif
367};
368
369extern struct snic_global *snic_glob;
370
371int snic_glob_init(void);
372void snic_glob_cleanup(void);
373
374extern struct workqueue_struct *snic_event_queue;
375extern struct device_attribute *snic_attrs[];
376
377int snic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
378int snic_abort_cmd(struct scsi_cmnd *);
379int snic_device_reset(struct scsi_cmnd *);
380int snic_host_reset(struct scsi_cmnd *);
381int snic_reset(struct Scsi_Host *, struct scsi_cmnd *);
382void snic_shutdown_scsi_cleanup(struct snic *);
383
384
385int snic_request_intr(struct snic *);
386void snic_free_intr(struct snic *);
387int snic_set_intr_mode(struct snic *);
388void snic_clear_intr_mode(struct snic *);
389
390int snic_fwcq_cmpl_handler(struct snic *, int);
391int snic_wq_cmpl_handler(struct snic *, int);
392void snic_free_wq_buf(struct vnic_wq *, struct vnic_wq_buf *);
393
394
395void snic_log_q_error(struct snic *);
396void snic_handle_link_event(struct snic *);
397void snic_handle_link(struct work_struct *);
398
399int snic_queue_exch_ver_req(struct snic *);
400int snic_io_exch_ver_cmpl_handler(struct snic *, struct snic_fw_req *);
401
402int snic_queue_wq_desc(struct snic *, void *os_buf, u16 len);
403
404void snic_handle_untagged_req(struct snic *, struct snic_req_info *);
405void snic_release_untagged_req(struct snic *, struct snic_req_info *);
406void snic_free_all_untagged_reqs(struct snic *);
407int snic_get_conf(struct snic *);
408void snic_set_state(struct snic *, enum snic_state);
409int snic_get_state(struct snic *);
410const char *snic_state_to_str(unsigned int);
411void snic_hex_dump(char *, char *, int);
412void snic_print_desc(const char *fn, char *os_buf, int len);
413const char *show_opcode_name(int val);
414#endif /* _SNIC_H */
diff --git a/drivers/scsi/snic/snic_attrs.c b/drivers/scsi/snic/snic_attrs.c
new file mode 100644
index 000000000000..32d5d556b6f8
--- /dev/null
+++ b/drivers/scsi/snic/snic_attrs.c
@@ -0,0 +1,77 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#include <linux/string.h>
19#include <linux/device.h>
20
21#include "snic.h"
22
23static ssize_t
24snic_show_sym_name(struct device *dev,
25 struct device_attribute *attr,
26 char *buf)
27{
28 struct snic *snic = shost_priv(class_to_shost(dev));
29
30 return snprintf(buf, PAGE_SIZE, "%s\n", snic->name);
31}
32
33static ssize_t
34snic_show_state(struct device *dev,
35 struct device_attribute *attr,
36 char *buf)
37{
38 struct snic *snic = shost_priv(class_to_shost(dev));
39
40 return snprintf(buf, PAGE_SIZE, "%s\n",
41 snic_state_str[snic_get_state(snic)]);
42}
43
44static ssize_t
45snic_show_drv_version(struct device *dev,
46 struct device_attribute *attr,
47 char *buf)
48{
49 return snprintf(buf, PAGE_SIZE, "%s\n", SNIC_DRV_VERSION);
50}
51
52static ssize_t
53snic_show_link_state(struct device *dev,
54 struct device_attribute *attr,
55 char *buf)
56{
57 struct snic *snic = shost_priv(class_to_shost(dev));
58
59 if (snic->config.xpt_type == SNIC_DAS)
60 snic->link_status = svnic_dev_link_status(snic->vdev);
61
62 return snprintf(buf, PAGE_SIZE, "%s\n",
63 (snic->link_status) ? "Link Up" : "Link Down");
64}
65
66static DEVICE_ATTR(snic_sym_name, S_IRUGO, snic_show_sym_name, NULL);
67static DEVICE_ATTR(snic_state, S_IRUGO, snic_show_state, NULL);
68static DEVICE_ATTR(drv_version, S_IRUGO, snic_show_drv_version, NULL);
69static DEVICE_ATTR(link_state, S_IRUGO, snic_show_link_state, NULL);
70
71struct device_attribute *snic_attrs[] = {
72 &dev_attr_snic_sym_name,
73 &dev_attr_snic_state,
74 &dev_attr_drv_version,
75 &dev_attr_link_state,
76 NULL,
77};
diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c
new file mode 100644
index 000000000000..aebe75320ed3
--- /dev/null
+++ b/drivers/scsi/snic/snic_ctl.c
@@ -0,0 +1,279 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#include <linux/errno.h>
19#include <linux/pci.h>
20#include <linux/slab.h>
21
22#include <linux/interrupt.h>
23#include <linux/workqueue.h>
24#include <linux/spinlock.h>
25#include <linux/mempool.h>
26#include <scsi/scsi_tcq.h>
27#include <linux/ctype.h>
28
29#include "snic_io.h"
30#include "snic.h"
31#include "cq_enet_desc.h"
32#include "snic_fwint.h"
33
34/*
35 * snic_handle_link : Handles link flaps.
36 */
37void
38snic_handle_link(struct work_struct *work)
39{
40 struct snic *snic = container_of(work, struct snic, link_work);
41
42 if (snic->config.xpt_type != SNIC_DAS) {
43 SNIC_HOST_INFO(snic->shost, "Link Event Received.\n");
44 SNIC_ASSERT_NOT_IMPL(1);
45
46 return;
47 }
48
49 snic->link_status = svnic_dev_link_status(snic->vdev);
50 snic->link_down_cnt = svnic_dev_link_down_cnt(snic->vdev);
51 SNIC_HOST_INFO(snic->shost, "Link Event: Link %s.\n",
52 ((snic->link_status) ? "Up" : "Down"));
53}
54
55
56/*
57 * snic_ver_enc : Encodes version str to int
58 * version string is similar to netmask string
59 */
60static int
61snic_ver_enc(const char *s)
62{
63 int v[4] = {0};
64 int i = 0, x = 0;
65 char c;
66 const char *p = s;
67
68 /* validate version string */
69 if ((strlen(s) > 15) || (strlen(s) < 7))
70 goto end;
71
72 while ((c = *p++)) {
73 if (c == '.') {
74 i++;
75 continue;
76 }
77
78 if (i > 4 || !isdigit(c))
79 goto end;
80
81 v[i] = v[i] * 10 + (c - '0');
82 }
83
84 /* validate sub version numbers */
85 for (i = 3; i >= 0; i--)
86 if (v[i] > 0xff)
87 goto end;
88
89 x |= (v[0] << 24) | v[1] << 16 | v[2] << 8 | v[3];
90
91end:
92 if (x == 0) {
93 SNIC_ERR("Invalid version string [%s].\n", s);
94
95 return -1;
96 }
97
98 return x;
99} /* end of snic_ver_enc */
100
101/*
102 * snic_qeueue_exch_ver_req :
103 *
104 * Queues Exchange Version Request, to communicate host information
105 * in return, it gets firmware version details
106 */
107int
108snic_queue_exch_ver_req(struct snic *snic)
109{
110 struct snic_req_info *rqi = NULL;
111 struct snic_host_req *req = NULL;
112 u32 ver = 0;
113 int ret = 0;
114
115 SNIC_HOST_INFO(snic->shost, "Exch Ver Req Preparing...\n");
116
117 rqi = snic_req_init(snic, 0);
118 if (!rqi) {
119 SNIC_HOST_ERR(snic->shost,
120 "Queuing Exch Ver Req failed, err = %d\n",
121 ret);
122
123 ret = -ENOMEM;
124 goto error;
125 }
126
127 req = rqi_to_req(rqi);
128
129 /* Initialize snic_host_req */
130 snic_io_hdr_enc(&req->hdr, SNIC_REQ_EXCH_VER, 0, SCSI_NO_TAG,
131 snic->config.hid, 0, (ulong)rqi);
132 ver = snic_ver_enc(SNIC_DRV_VERSION);
133 req->u.exch_ver.drvr_ver = cpu_to_le32(ver);
134 req->u.exch_ver.os_type = cpu_to_le32(SNIC_OS_LINUX);
135
136 snic_handle_untagged_req(snic, rqi);
137
138 ret = snic_queue_wq_desc(snic, req, sizeof(*req));
139 if (ret) {
140 snic_release_untagged_req(snic, rqi);
141 SNIC_HOST_ERR(snic->shost,
142 "Queuing Exch Ver Req failed, err = %d\n",
143 ret);
144 goto error;
145 }
146
147 SNIC_HOST_INFO(snic->shost, "Exch Ver Req is issued. ret = %d\n", ret);
148
149error:
150 return ret;
151} /* end of snic_queue_exch_ver_req */
152
153/*
154 * snic_io_exch_ver_cmpl_handler
155 */
156int
157snic_io_exch_ver_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
158{
159 struct snic_req_info *rqi = NULL;
160 struct snic_exch_ver_rsp *exv_cmpl = &fwreq->u.exch_ver_cmpl;
161 u8 typ, hdr_stat;
162 u32 cmnd_id, hid, max_sgs;
163 ulong ctx = 0;
164 unsigned long flags;
165 int ret = 0;
166
167 SNIC_HOST_INFO(snic->shost, "Exch Ver Compl Received.\n");
168 snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
169 SNIC_BUG_ON(snic->config.hid != hid);
170 rqi = (struct snic_req_info *) ctx;
171
172 if (hdr_stat) {
173 SNIC_HOST_ERR(snic->shost,
174 "Exch Ver Completed w/ err status %d\n",
175 hdr_stat);
176
177 goto exch_cmpl_end;
178 }
179
180 spin_lock_irqsave(&snic->snic_lock, flags);
181 snic->fwinfo.fw_ver = le32_to_cpu(exv_cmpl->version);
182 snic->fwinfo.hid = le32_to_cpu(exv_cmpl->hid);
183 snic->fwinfo.max_concur_ios = le32_to_cpu(exv_cmpl->max_concur_ios);
184 snic->fwinfo.max_sgs_per_cmd = le32_to_cpu(exv_cmpl->max_sgs_per_cmd);
185 snic->fwinfo.max_io_sz = le32_to_cpu(exv_cmpl->max_io_sz);
186 snic->fwinfo.max_tgts = le32_to_cpu(exv_cmpl->max_tgts);
187 snic->fwinfo.io_tmo = le16_to_cpu(exv_cmpl->io_timeout);
188
189 SNIC_HOST_INFO(snic->shost,
190 "vers %u hid %u max_concur_ios %u max_sgs_per_cmd %u max_io_sz %u max_tgts %u fw tmo %u\n",
191 snic->fwinfo.fw_ver,
192 snic->fwinfo.hid,
193 snic->fwinfo.max_concur_ios,
194 snic->fwinfo.max_sgs_per_cmd,
195 snic->fwinfo.max_io_sz,
196 snic->fwinfo.max_tgts,
197 snic->fwinfo.io_tmo);
198
199 SNIC_HOST_INFO(snic->shost,
200 "HBA Capabilities = 0x%x\n",
201 le32_to_cpu(exv_cmpl->hba_cap));
202
203 /* Updating SGList size */
204 max_sgs = snic->fwinfo.max_sgs_per_cmd;
205 if (max_sgs && max_sgs < SNIC_MAX_SG_DESC_CNT) {
206 snic->shost->sg_tablesize = max_sgs;
207 SNIC_HOST_INFO(snic->shost, "Max SGs set to %d\n",
208 snic->shost->sg_tablesize);
209 } else if (max_sgs > snic->shost->sg_tablesize) {
210 SNIC_HOST_INFO(snic->shost,
211 "Target type %d Supports Larger Max SGList %d than driver's Max SG List %d.\n",
212 snic->config.xpt_type, max_sgs,
213 snic->shost->sg_tablesize);
214 }
215
216 if (snic->shost->can_queue > snic->fwinfo.max_concur_ios)
217 snic->shost->can_queue = snic->fwinfo.max_concur_ios;
218
219 snic->shost->max_sectors = snic->fwinfo.max_io_sz >> 9;
220 if (snic->fwinfo.wait)
221 complete(snic->fwinfo.wait);
222
223 spin_unlock_irqrestore(&snic->snic_lock, flags);
224
225exch_cmpl_end:
226 snic_release_untagged_req(snic, rqi);
227
228 SNIC_HOST_INFO(snic->shost, "Exch_cmpl Done, hdr_stat %d.\n", hdr_stat);
229
230 return ret;
231} /* end of snic_io_exch_ver_cmpl_handler */
232
233/*
234 * snic_get_conf
235 *
236 * Synchronous call, and Retrieves snic params.
237 */
238int
239snic_get_conf(struct snic *snic)
240{
241 DECLARE_COMPLETION_ONSTACK(wait);
242 unsigned long flags;
243 int ret;
244 int nr_retries = 3;
245
246 SNIC_HOST_INFO(snic->shost, "Retrieving snic params.\n");
247 spin_lock_irqsave(&snic->snic_lock, flags);
248 memset(&snic->fwinfo, 0, sizeof(snic->fwinfo));
249 snic->fwinfo.wait = &wait;
250 spin_unlock_irqrestore(&snic->snic_lock, flags);
251
252 /* Additional delay to handle HW Resource initialization. */
253 msleep(50);
254
255 /*
256 * Exch ver req can be ignored by FW, if HW Resource initialization
257 * is in progress, Hence retry.
258 */
259 do {
260 ret = snic_queue_exch_ver_req(snic);
261 if (ret)
262 return ret;
263
264 wait_for_completion_timeout(&wait, msecs_to_jiffies(2000));
265 spin_lock_irqsave(&snic->snic_lock, flags);
266 ret = (snic->fwinfo.fw_ver != 0) ? 0 : -ETIMEDOUT;
267 if (ret)
268 SNIC_HOST_ERR(snic->shost,
269 "Failed to retrieve snic params,\n");
270
271 /* Unset fwinfo.wait, on success or on last retry */
272 if (ret == 0 || nr_retries == 1)
273 snic->fwinfo.wait = NULL;
274
275 spin_unlock_irqrestore(&snic->snic_lock, flags);
276 } while (ret && --nr_retries);
277
278 return ret;
279} /* end of snic_get_info */
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
new file mode 100644
index 000000000000..1686f0196251
--- /dev/null
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -0,0 +1,560 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#include <linux/module.h>
19#include <linux/errno.h>
20#include <linux/debugfs.h>
21
22#include "snic.h"
23
24/*
25 * snic_debugfs_init - Initialize debugfs for snic debug logging
26 *
27 * Description:
28 * When Debugfs is configured this routine sets up fnic debugfs
29 * filesystem. If not already created. this routine will crate the
30 * fnic directory and statistics directory for trace buffer and
31 * stats logging
32 */
33
34int
35snic_debugfs_init(void)
36{
37 int rc = -1;
38 struct dentry *de = NULL;
39
40 de = debugfs_create_dir("snic", NULL);
41 if (!de) {
42 SNIC_DBG("Cannot create debugfs root\n");
43
44 return rc;
45 }
46 snic_glob->trc_root = de;
47
48 de = debugfs_create_dir("statistics", snic_glob->trc_root);
49 if (!de) {
50 SNIC_DBG("Cannot create Statistics directory\n");
51
52 return rc;
53 }
54 snic_glob->stats_root = de;
55
56 rc = 0;
57
58 return rc;
59} /* end of snic_debugfs_init */
60
61/*
62 * snic_debugfs_term - Tear down debugfs intrastructure
63 *
64 * Description:
65 * When Debufs is configured this routine removes debugfs file system
66 * elements that are specific to snic
67 */
68void
69snic_debugfs_term(void)
70{
71 debugfs_remove(snic_glob->stats_root);
72 snic_glob->stats_root = NULL;
73
74 debugfs_remove(snic_glob->trc_root);
75 snic_glob->trc_root = NULL;
76}
77
78/*
79 * snic_reset_stats_open - Open the reset_stats file
80 */
81static int
82snic_reset_stats_open(struct inode *inode, struct file *filp)
83{
84 SNIC_BUG_ON(!inode->i_private);
85 filp->private_data = inode->i_private;
86
87 return 0;
88}
89
90/*
91 * snic_reset_stats_read - Read a reset_stats debugfs file
92 * @filp: The file pointer to read from.
93 * @ubuf: The buffer tocopy the data to.
94 * @cnt: The number of bytes to read.
95 * @ppos: The position in the file to start reading frm.
96 *
97 * Description:
98 * This routine reads value of variable reset_stats
99 * and stores into local @buf. It will start reading file @ppos and
100 * copy up to @cnt of data to @ubuf from @buf.
101 *
102 * Returns:
103 * This function returns the amount of data that was read.
104 */
105static ssize_t
106snic_reset_stats_read(struct file *filp,
107 char __user *ubuf,
108 size_t cnt,
109 loff_t *ppos)
110{
111 struct snic *snic = (struct snic *) filp->private_data;
112 char buf[64];
113 int len;
114
115 len = sprintf(buf, "%u\n", snic->reset_stats);
116
117 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
118}
119
120/*
121 * snic_reset_stats_write - Write to reset_stats debugfs file
122 * @filp: The file pointer to write from
123 * @ubuf: The buffer to copy the data from.
124 * @cnt: The number of bytes to write.
125 * @ppos: The position in the file to start writing to.
126 *
127 * Description:
128 * This routine writes data from user buffer @ubuf to buffer @buf and
129 * resets cumulative stats of snic.
130 *
131 * Returns:
132 * This function returns the amount of data that was written.
133 */
134static ssize_t
135snic_reset_stats_write(struct file *filp,
136 const char __user *ubuf,
137 size_t cnt,
138 loff_t *ppos)
139{
140 struct snic *snic = (struct snic *) filp->private_data;
141 struct snic_stats *stats = &snic->s_stats;
142 u64 *io_stats_p = (u64 *) &stats->io;
143 u64 *fw_stats_p = (u64 *) &stats->fw;
144 char buf[64];
145 unsigned long val;
146 int ret;
147
148 if (cnt >= sizeof(buf))
149 return -EINVAL;
150
151 if (copy_from_user(&buf, ubuf, cnt))
152 return -EFAULT;
153
154 buf[cnt] = '\0';
155
156 ret = kstrtoul(buf, 10, &val);
157 if (ret < 0)
158 return ret;
159
160 snic->reset_stats = val;
161
162 if (snic->reset_stats) {
163 /* Skip variable is used to avoid descrepancies to Num IOs
164 * and IO Completions stats. Skip incrementing No IO Compls
165 * for pending active IOs after reset_stats
166 */
167 atomic64_set(&snic->io_cmpl_skip,
168 atomic64_read(&stats->io.active));
169 memset(&stats->abts, 0, sizeof(struct snic_abort_stats));
170 memset(&stats->reset, 0, sizeof(struct snic_reset_stats));
171 memset(&stats->misc, 0, sizeof(struct snic_misc_stats));
172 memset(io_stats_p+1,
173 0,
174 sizeof(struct snic_io_stats) - sizeof(u64));
175 memset(fw_stats_p+1,
176 0,
177 sizeof(struct snic_fw_stats) - sizeof(u64));
178 }
179
180 (*ppos)++;
181
182 SNIC_HOST_INFO(snic->shost, "Reset Op: Driver statistics.\n");
183
184 return cnt;
185}
186
187static int
188snic_reset_stats_release(struct inode *inode, struct file *filp)
189{
190 filp->private_data = NULL;
191
192 return 0;
193}
194
195/*
196 * snic_stats_show - Formats and prints per host specific driver stats.
197 */
198static int
199snic_stats_show(struct seq_file *sfp, void *data)
200{
201 struct snic *snic = (struct snic *) sfp->private;
202 struct snic_stats *stats = &snic->s_stats;
203 struct timespec last_isr_tms, last_ack_tms;
204 u64 maxio_tm;
205 int i;
206
207 /* Dump IO Stats */
208 seq_printf(sfp,
209 "------------------------------------------\n"
210 "\t\t IO Statistics\n"
211 "------------------------------------------\n");
212
213 maxio_tm = (u64) atomic64_read(&stats->io.max_time);
214 seq_printf(sfp,
215 "Active IOs : %lld\n"
216 "Max Active IOs : %lld\n"
217 "Total IOs : %lld\n"
218 "IOs Completed : %lld\n"
219 "IOs Failed : %lld\n"
220 "IOs Not Found : %lld\n"
221 "Memory Alloc Failures : %lld\n"
222 "REQs Null : %lld\n"
223 "SCSI Cmd Pointers Null : %lld\n"
224 "Max SGL for any IO : %lld\n"
225 "Max IO Size : %lld Sectors\n"
226 "Max Queuing Time : %lld\n"
227 "Max Completion Time : %lld\n"
228 "Max IO Process Time(FW) : %lld (%u msec)\n",
229 (u64) atomic64_read(&stats->io.active),
230 (u64) atomic64_read(&stats->io.max_active),
231 (u64) atomic64_read(&stats->io.num_ios),
232 (u64) atomic64_read(&stats->io.compl),
233 (u64) atomic64_read(&stats->io.fail),
234 (u64) atomic64_read(&stats->io.io_not_found),
235 (u64) atomic64_read(&stats->io.alloc_fail),
236 (u64) atomic64_read(&stats->io.req_null),
237 (u64) atomic64_read(&stats->io.sc_null),
238 (u64) atomic64_read(&stats->io.max_sgl),
239 (u64) atomic64_read(&stats->io.max_io_sz),
240 (u64) atomic64_read(&stats->io.max_qtime),
241 (u64) atomic64_read(&stats->io.max_cmpl_time),
242 maxio_tm,
243 jiffies_to_msecs(maxio_tm));
244
245 seq_puts(sfp, "\nSGL Counters\n");
246
247 for (i = 0; i < SNIC_MAX_SG_DESC_CNT; i++) {
248 seq_printf(sfp,
249 "%10lld ",
250 (u64) atomic64_read(&stats->io.sgl_cnt[i]));
251
252 if ((i + 1) % 8 == 0)
253 seq_puts(sfp, "\n");
254 }
255
256 /* Dump Abort Stats */
257 seq_printf(sfp,
258 "\n-------------------------------------------\n"
259 "\t\t Abort Statistics\n"
260 "---------------------------------------------\n");
261
262 seq_printf(sfp,
263 "Aborts : %lld\n"
264 "Aborts Fail : %lld\n"
265 "Aborts Driver Timeout : %lld\n"
266 "Abort FW Timeout : %lld\n"
267 "Abort IO NOT Found : %lld\n",
268 (u64) atomic64_read(&stats->abts.num),
269 (u64) atomic64_read(&stats->abts.fail),
270 (u64) atomic64_read(&stats->abts.drv_tmo),
271 (u64) atomic64_read(&stats->abts.fw_tmo),
272 (u64) atomic64_read(&stats->abts.io_not_found));
273
274 /* Dump Reset Stats */
275 seq_printf(sfp,
276 "\n-------------------------------------------\n"
277 "\t\t Reset Statistics\n"
278 "---------------------------------------------\n");
279
280 seq_printf(sfp,
281 "HBA Resets : %lld\n"
282 "HBA Reset Cmpls : %lld\n"
283 "HBA Reset Fail : %lld\n",
284 (u64) atomic64_read(&stats->reset.hba_resets),
285 (u64) atomic64_read(&stats->reset.hba_reset_cmpl),
286 (u64) atomic64_read(&stats->reset.hba_reset_fail));
287
288 /* Dump Firmware Stats */
289 seq_printf(sfp,
290 "\n-------------------------------------------\n"
291 "\t\t Firmware Statistics\n"
292 "---------------------------------------------\n");
293
294 seq_printf(sfp,
295 "Active FW Requests : %lld\n"
296 "Max FW Requests : %lld\n"
297 "FW Out Of Resource Errs : %lld\n"
298 "FW IO Errors : %lld\n"
299 "FW SCSI Errors : %lld\n",
300 (u64) atomic64_read(&stats->fw.actv_reqs),
301 (u64) atomic64_read(&stats->fw.max_actv_reqs),
302 (u64) atomic64_read(&stats->fw.out_of_res),
303 (u64) atomic64_read(&stats->fw.io_errs),
304 (u64) atomic64_read(&stats->fw.scsi_errs));
305
306
307 /* Dump Miscellenous Stats */
308 seq_printf(sfp,
309 "\n---------------------------------------------\n"
310 "\t\t Other Statistics\n"
311 "\n---------------------------------------------\n");
312
313 jiffies_to_timespec(stats->misc.last_isr_time, &last_isr_tms);
314 jiffies_to_timespec(stats->misc.last_ack_time, &last_ack_tms);
315
316 seq_printf(sfp,
317 "Last ISR Time : %llu (%8lu.%8lu)\n"
318 "Last Ack Time : %llu (%8lu.%8lu)\n"
319 "ISRs : %llu\n"
320 "Max CQ Entries : %lld\n"
321 "Data Count Mismatch : %lld\n"
322 "IOs w/ Timeout Status : %lld\n"
323 "IOs w/ Aborted Status : %lld\n"
324 "IOs w/ SGL Invalid Stat : %lld\n"
325 "WQ Desc Alloc Fail : %lld\n"
326 "Queue Full : %lld\n"
327 "Target Not Ready : %lld\n",
328 (u64) stats->misc.last_isr_time,
329 last_isr_tms.tv_sec, last_isr_tms.tv_nsec,
330 (u64)stats->misc.last_ack_time,
331 last_ack_tms.tv_sec, last_ack_tms.tv_nsec,
332 (u64) atomic64_read(&stats->misc.isr_cnt),
333 (u64) atomic64_read(&stats->misc.max_cq_ents),
334 (u64) atomic64_read(&stats->misc.data_cnt_mismat),
335 (u64) atomic64_read(&stats->misc.io_tmo),
336 (u64) atomic64_read(&stats->misc.io_aborted),
337 (u64) atomic64_read(&stats->misc.sgl_inval),
338 (u64) atomic64_read(&stats->misc.wq_alloc_fail),
339 (u64) atomic64_read(&stats->misc.qfull),
340 (u64) atomic64_read(&stats->misc.tgt_not_rdy));
341
342 return 0;
343}
344
345/*
346 * snic_stats_open - Open the stats file for specific host
347 *
348 * Description:
349 * This routine opens a debugfs file stats of specific host
350 */
351static int
352snic_stats_open(struct inode *inode, struct file *filp)
353{
354 return single_open(filp, snic_stats_show, inode->i_private);
355}
356
357static const struct file_operations snic_stats_fops = {
358 .owner = THIS_MODULE,
359 .open = snic_stats_open,
360 .read = seq_read,
361 .llseek = seq_lseek,
362 .release = single_release,
363};
364
365static const struct file_operations snic_reset_stats_fops = {
366 .owner = THIS_MODULE,
367 .open = snic_reset_stats_open,
368 .read = snic_reset_stats_read,
369 .write = snic_reset_stats_write,
370 .release = snic_reset_stats_release,
371};
372
373/*
374 * snic_stats_init - Initialize stats struct and create stats file
375 * per snic
376 *
377 * Description:
378 * When debugfs is cofigured this routine sets up the stats file per snic
379 * It will create file stats and reset_stats under statistics/host# directory
380 * to log per snic stats
381 */
382int
383snic_stats_debugfs_init(struct snic *snic)
384{
385 int rc = -1;
386 char name[16];
387 struct dentry *de = NULL;
388
389 snprintf(name, sizeof(name), "host%d", snic->shost->host_no);
390 if (!snic_glob->stats_root) {
391 SNIC_DBG("snic_stats root doesn't exist\n");
392
393 return rc;
394 }
395
396 de = debugfs_create_dir(name, snic_glob->stats_root);
397 if (!de) {
398 SNIC_DBG("Cannot create host directory\n");
399
400 return rc;
401 }
402 snic->stats_host = de;
403
404 de = debugfs_create_file("stats",
405 S_IFREG|S_IRUGO,
406 snic->stats_host,
407 snic,
408 &snic_stats_fops);
409 if (!de) {
410 SNIC_DBG("Cannot create host's stats file\n");
411
412 return rc;
413 }
414 snic->stats_file = de;
415
416 de = debugfs_create_file("reset_stats",
417 S_IFREG|S_IRUGO|S_IWUSR,
418 snic->stats_host,
419 snic,
420 &snic_reset_stats_fops);
421
422 if (!de) {
423 SNIC_DBG("Cannot create host's reset_stats file\n");
424
425 return rc;
426 }
427 snic->reset_stats_file = de;
428 rc = 0;
429
430 return rc;
431} /* end of snic_stats_debugfs_init */
432
433/*
434 * snic_stats_debugfs_remove - Tear down debugfs infrastructure of stats
435 *
436 * Description:
437 * When Debufs is configured this routine removes debugfs file system
438 * elements that are specific to to snic stats
439 */
440void
441snic_stats_debugfs_remove(struct snic *snic)
442{
443 debugfs_remove(snic->stats_file);
444 snic->stats_file = NULL;
445
446 debugfs_remove(snic->reset_stats_file);
447 snic->reset_stats_file = NULL;
448
449 debugfs_remove(snic->stats_host);
450 snic->stats_host = NULL;
451}
452
453/* Trace Facility related API */
454static void *
455snic_trc_seq_start(struct seq_file *sfp, loff_t *pos)
456{
457 return &snic_glob->trc;
458}
459
460static void *
461snic_trc_seq_next(struct seq_file *sfp, void *data, loff_t *pos)
462{
463 return NULL;
464}
465
466static void
467snic_trc_seq_stop(struct seq_file *sfp, void *data)
468{
469}
470
471#define SNIC_TRC_PBLEN 256
472static int
473snic_trc_seq_show(struct seq_file *sfp, void *data)
474{
475 char buf[SNIC_TRC_PBLEN];
476
477 if (snic_get_trc_data(buf, SNIC_TRC_PBLEN) > 0)
478 seq_printf(sfp, "%s\n", buf);
479
480 return 0;
481}
482
483static const struct seq_operations snic_trc_seq_ops = {
484 .start = snic_trc_seq_start,
485 .next = snic_trc_seq_next,
486 .stop = snic_trc_seq_stop,
487 .show = snic_trc_seq_show,
488};
489
490static int
491snic_trc_open(struct inode *inode, struct file *filp)
492{
493 return seq_open(filp, &snic_trc_seq_ops);
494}
495
496static const struct file_operations snic_trc_fops = {
497 .owner = THIS_MODULE,
498 .open = snic_trc_open,
499 .read = seq_read,
500 .llseek = seq_lseek,
501 .release = seq_release,
502};
503
504/*
505 * snic_trc_debugfs_init : creates trace/tracing_enable files for trace
506 * under debugfs
507 */
508int
509snic_trc_debugfs_init(void)
510{
511 struct dentry *de = NULL;
512 int ret = -1;
513
514 if (!snic_glob->trc_root) {
515 SNIC_ERR("Debugfs root directory for snic doesn't exist.\n");
516
517 return ret;
518 }
519
520 de = debugfs_create_bool("tracing_enable",
521 S_IFREG | S_IRUGO | S_IWUSR,
522 snic_glob->trc_root,
523 &snic_glob->trc.enable);
524
525 if (!de) {
526 SNIC_ERR("Can't create trace_enable file.\n");
527
528 return ret;
529 }
530 snic_glob->trc.trc_enable = de;
531
532 de = debugfs_create_file("trace",
533 S_IFREG | S_IRUGO | S_IWUSR,
534 snic_glob->trc_root,
535 NULL,
536 &snic_trc_fops);
537
538 if (!de) {
539 SNIC_ERR("Cann't create trace file.\n");
540
541 return ret;
542 }
543 snic_glob->trc.trc_file = de;
544 ret = 0;
545
546 return ret;
547} /* end of snic_trc_debugfs_init */
548
549/*
550 * snic_trc_debugfs_term : cleans up the files created for trace under debugfs
551 */
552void
553snic_trc_debugfs_term(void)
554{
555 debugfs_remove(snic_glob->trc.trc_file);
556 snic_glob->trc.trc_file = NULL;
557
558 debugfs_remove(snic_glob->trc.trc_enable);
559 snic_glob->trc.trc_enable = NULL;
560}
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
new file mode 100644
index 000000000000..5f6321759ad9
--- /dev/null
+++ b/drivers/scsi/snic/snic_disc.c
@@ -0,0 +1,551 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#include <linux/errno.h>
19#include <linux/mempool.h>
20
21#include <scsi/scsi_tcq.h>
22
23#include "snic_disc.h"
24#include "snic.h"
25#include "snic_io.h"
26
27
28/* snic target types */
29static const char * const snic_tgt_type_str[] = {
30 [SNIC_TGT_DAS] = "DAS",
31 [SNIC_TGT_SAN] = "SAN",
32};
33
34static inline const char *
35snic_tgt_type_to_str(int typ)
36{
37 return ((typ > SNIC_TGT_NONE && typ <= SNIC_TGT_SAN) ?
38 snic_tgt_type_str[typ] : "Unknown");
39}
40
41static const char * const snic_tgt_state_str[] = {
42 [SNIC_TGT_STAT_INIT] = "INIT",
43 [SNIC_TGT_STAT_ONLINE] = "ONLINE",
44 [SNIC_TGT_STAT_OFFLINE] = "OFFLINE",
45 [SNIC_TGT_STAT_DEL] = "DELETION IN PROGRESS",
46};
47
48const char *
49snic_tgt_state_to_str(int state)
50{
51 return ((state >= SNIC_TGT_STAT_INIT && state <= SNIC_TGT_STAT_DEL) ?
52 snic_tgt_state_str[state] : "UNKNOWN");
53}
54
55/*
56 * Initiate report_tgt req desc
57 */
58static void
59snic_report_tgt_init(struct snic_host_req *req, u32 hid, u8 *buf, u32 len,
60 dma_addr_t rsp_buf_pa, ulong ctx)
61{
62 struct snic_sg_desc *sgd = NULL;
63
64
65 snic_io_hdr_enc(&req->hdr, SNIC_REQ_REPORT_TGTS, 0, SCSI_NO_TAG, hid,
66 1, ctx);
67
68 req->u.rpt_tgts.sg_cnt = cpu_to_le16(1);
69 sgd = req_to_sgl(req);
70 sgd[0].addr = cpu_to_le64(rsp_buf_pa);
71 sgd[0].len = cpu_to_le32(len);
72 sgd[0]._resvd = 0;
73 req->u.rpt_tgts.sg_addr = cpu_to_le64((ulong)sgd);
74}
75
76/*
77 * snic_queue_report_tgt_req: Queues report target request.
78 */
79static int
80snic_queue_report_tgt_req(struct snic *snic)
81{
82 struct snic_req_info *rqi = NULL;
83 u32 ntgts, buf_len = 0;
84 u8 *buf = NULL;
85 dma_addr_t pa = 0;
86 int ret = 0;
87
88 rqi = snic_req_init(snic, 1);
89 if (!rqi) {
90 ret = -ENOMEM;
91 goto error;
92 }
93
94 if (snic->fwinfo.max_tgts)
95 ntgts = min_t(u32, snic->fwinfo.max_tgts, snic->shost->max_id);
96 else
97 ntgts = snic->shost->max_id;
98
99 /* Allocate Response Buffer */
100 SNIC_BUG_ON(ntgts == 0);
101 buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN;
102
103 buf = kzalloc(buf_len, GFP_KERNEL|GFP_DMA);
104 if (!buf) {
105 snic_req_free(snic, rqi);
106 SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n");
107
108 ret = -ENOMEM;
109 goto error;
110 }
111
112 SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
113
114 pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE);
115 if (pci_dma_mapping_error(snic->pdev, pa)) {
116 kfree(buf);
117 snic_req_free(snic, rqi);
118 SNIC_HOST_ERR(snic->shost,
119 "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
120 buf);
121 ret = -EINVAL;
122
123 goto error;
124 }
125
126
127 SNIC_BUG_ON(pa == 0);
128 rqi->sge_va = (ulong) buf;
129
130 snic_report_tgt_init(rqi->req,
131 snic->config.hid,
132 buf,
133 buf_len,
134 pa,
135 (ulong)rqi);
136
137 snic_handle_untagged_req(snic, rqi);
138
139 ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
140 if (ret) {
141 pci_unmap_single(snic->pdev, pa, buf_len, PCI_DMA_FROMDEVICE);
142 kfree(buf);
143 rqi->sge_va = 0;
144 snic_release_untagged_req(snic, rqi);
145 SNIC_HOST_ERR(snic->shost, "Queuing Report Tgts Failed.\n");
146
147 goto error;
148 }
149
150 SNIC_DISC_DBG(snic->shost, "Report Targets Issued.\n");
151
152 return ret;
153
154error:
155 SNIC_HOST_ERR(snic->shost,
156 "Queuing Report Targets Failed, err = %d\n",
157 ret);
158 return ret;
159} /* end of snic_queue_report_tgt_req */
160
161/* call into SML */
162static void
163snic_scsi_scan_tgt(struct work_struct *work)
164{
165 struct snic_tgt *tgt = container_of(work, struct snic_tgt, scan_work);
166 struct Scsi_Host *shost = dev_to_shost(&tgt->dev);
167 unsigned long flags;
168
169 SNIC_HOST_INFO(shost, "Scanning Target id 0x%x\n", tgt->id);
170 scsi_scan_target(&tgt->dev,
171 tgt->channel,
172 tgt->scsi_tgt_id,
173 SCAN_WILD_CARD,
174 1);
175
176 spin_lock_irqsave(shost->host_lock, flags);
177 tgt->flags &= ~SNIC_TGT_SCAN_PENDING;
178 spin_unlock_irqrestore(shost->host_lock, flags);
179} /* end of snic_scsi_scan_tgt */
180
181/*
182 * snic_tgt_lookup :
183 */
184static struct snic_tgt *
185snic_tgt_lookup(struct snic *snic, struct snic_tgt_id *tgtid)
186{
187 struct list_head *cur, *nxt;
188 struct snic_tgt *tgt = NULL;
189
190 list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
191 tgt = list_entry(cur, struct snic_tgt, list);
192 if (tgt->id == le32_to_cpu(tgtid->tgt_id))
193 return tgt;
194 tgt = NULL;
195 }
196
197 return tgt;
198} /* end of snic_tgt_lookup */
199
200/*
201 * snic_tgt_dev_release : Called on dropping last ref for snic_tgt object
202 */
203void
204snic_tgt_dev_release(struct device *dev)
205{
206 struct snic_tgt *tgt = dev_to_tgt(dev);
207
208 SNIC_HOST_INFO(snic_tgt_to_shost(tgt),
209 "Target Device ID %d (%s) Permanently Deleted.\n",
210 tgt->id,
211 dev_name(dev));
212
213 SNIC_BUG_ON(!list_empty(&tgt->list));
214 kfree(tgt);
215}
216
217/*
218 * snic_tgt_del : work function to delete snic_tgt
219 */
220static void
221snic_tgt_del(struct work_struct *work)
222{
223 struct snic_tgt *tgt = container_of(work, struct snic_tgt, del_work);
224 struct Scsi_Host *shost = snic_tgt_to_shost(tgt);
225
226 if (tgt->flags & SNIC_TGT_SCAN_PENDING)
227 scsi_flush_work(shost);
228
229 /* Block IOs on child devices, stops new IOs */
230 scsi_target_block(&tgt->dev);
231
232 /* Cleanup IOs */
233 snic_tgt_scsi_abort_io(tgt);
234
235 /* Unblock IOs now, to flush if there are any. */
236 scsi_target_unblock(&tgt->dev, SDEV_TRANSPORT_OFFLINE);
237
238 /* Delete SCSI Target and sdevs */
239 scsi_remove_target(&tgt->dev); /* ?? */
240 device_del(&tgt->dev);
241 put_device(&tgt->dev);
242} /* end of snic_tgt_del */
243
244/* snic_tgt_create: checks for existence of snic_tgt, if it doesn't
245 * it creates one.
246 */
247static struct snic_tgt *
248snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
249{
250 struct snic_tgt *tgt = NULL;
251 unsigned long flags;
252 int ret;
253
254 tgt = snic_tgt_lookup(snic, tgtid);
255 if (tgt) {
256 /* update the information if required */
257 return tgt;
258 }
259
260 tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
261 if (!tgt) {
262 SNIC_HOST_ERR(snic->shost, "Failure to allocate snic_tgt.\n");
263 ret = -ENOMEM;
264
265 return tgt;
266 }
267
268 INIT_LIST_HEAD(&tgt->list);
269 tgt->id = le32_to_cpu(tgtid->tgt_id);
270 tgt->channel = 0;
271
272 SNIC_BUG_ON(le16_to_cpu(tgtid->tgt_type) > SNIC_TGT_SAN);
273 tgt->tdata.typ = le16_to_cpu(tgtid->tgt_type);
274
275 /*
276 * Plugging into SML Device Tree
277 */
278 tgt->tdata.disc_id = 0;
279 tgt->state = SNIC_TGT_STAT_INIT;
280 device_initialize(&tgt->dev);
281 tgt->dev.parent = get_device(&snic->shost->shost_gendev);
282 tgt->dev.release = snic_tgt_dev_release;
283 INIT_WORK(&tgt->scan_work, snic_scsi_scan_tgt);
284 INIT_WORK(&tgt->del_work, snic_tgt_del);
285 switch (tgt->tdata.typ) {
286 case SNIC_TGT_DAS:
287 dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
288 snic->shost->host_no, tgt->channel, tgt->id);
289 break;
290
291 case SNIC_TGT_SAN:
292 dev_set_name(&tgt->dev, "snic_san_tgt:%d:%d-%d",
293 snic->shost->host_no, tgt->channel, tgt->id);
294 break;
295
296 default:
297 SNIC_HOST_INFO(snic->shost, "Target type Unknown Detected.\n");
298 dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
299 snic->shost->host_no, tgt->channel, tgt->id);
300 break;
301 }
302
303 spin_lock_irqsave(snic->shost->host_lock, flags);
304 list_add_tail(&tgt->list, &snic->disc.tgt_list);
305 tgt->scsi_tgt_id = snic->disc.nxt_tgt_id++;
306 tgt->state = SNIC_TGT_STAT_ONLINE;
307 spin_unlock_irqrestore(snic->shost->host_lock, flags);
308
309 SNIC_HOST_INFO(snic->shost,
310 "Tgt %d, type = %s detected. Adding..\n",
311 tgt->id, snic_tgt_type_to_str(tgt->tdata.typ));
312
313 ret = device_add(&tgt->dev);
314 if (ret) {
315 SNIC_HOST_ERR(snic->shost,
316 "Snic Tgt: device_add, with err = %d\n",
317 ret);
318
319 put_device(&snic->shost->shost_gendev);
320 kfree(tgt);
321 tgt = NULL;
322
323 return tgt;
324 }
325
326 SNIC_HOST_INFO(snic->shost, "Scanning %s.\n", dev_name(&tgt->dev));
327
328 scsi_queue_work(snic->shost, &tgt->scan_work);
329
330 return tgt;
331} /* end of snic_tgt_create */
332
333/* Handler for discovery */
334void
335snic_handle_tgt_disc(struct work_struct *work)
336{
337 struct snic *snic = container_of(work, struct snic, tgt_work);
338 struct snic_tgt_id *tgtid = NULL;
339 struct snic_tgt *tgt = NULL;
340 unsigned long flags;
341 int i;
342
343 spin_lock_irqsave(&snic->snic_lock, flags);
344 if (snic->in_remove) {
345 spin_unlock_irqrestore(&snic->snic_lock, flags);
346 kfree(snic->disc.rtgt_info);
347
348 return;
349 }
350 spin_unlock_irqrestore(&snic->snic_lock, flags);
351
352 mutex_lock(&snic->disc.mutex);
353 /* Discover triggered during disc in progress */
354 if (snic->disc.req_cnt) {
355 snic->disc.state = SNIC_DISC_DONE;
356 snic->disc.req_cnt = 0;
357 mutex_unlock(&snic->disc.mutex);
358 kfree(snic->disc.rtgt_info);
359 snic->disc.rtgt_info = NULL;
360
361 SNIC_HOST_INFO(snic->shost, "tgt_disc: Discovery restart.\n");
362 /* Start Discovery Again */
363 snic_disc_start(snic);
364
365 return;
366 }
367
368 tgtid = (struct snic_tgt_id *)snic->disc.rtgt_info;
369
370 SNIC_BUG_ON(snic->disc.rtgt_cnt == 0 || tgtid == NULL);
371
372 for (i = 0; i < snic->disc.rtgt_cnt; i++) {
373 tgt = snic_tgt_create(snic, &tgtid[i]);
374 if (!tgt) {
375 int buf_sz = snic->disc.rtgt_cnt * sizeof(*tgtid);
376
377 SNIC_HOST_ERR(snic->shost, "Failed to create tgt.\n");
378 snic_hex_dump("rpt_tgt_rsp", (char *)tgtid, buf_sz);
379 break;
380 }
381 }
382
383 snic->disc.rtgt_info = NULL;
384 snic->disc.state = SNIC_DISC_DONE;
385 mutex_unlock(&snic->disc.mutex);
386
387 SNIC_HOST_INFO(snic->shost, "Discovery Completed.\n");
388
389 kfree(tgtid);
390} /* end of snic_handle_tgt_disc */
391
392
393int
394snic_report_tgt_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
395{
396
397 u8 typ, cmpl_stat;
398 u32 cmnd_id, hid, tgt_cnt = 0;
399 ulong ctx;
400 struct snic_req_info *rqi = NULL;
401 struct snic_tgt_id *tgtid;
402 int i, ret = 0;
403
404 snic_io_hdr_dec(&fwreq->hdr, &typ, &cmpl_stat, &cmnd_id, &hid, &ctx);
405 rqi = (struct snic_req_info *) ctx;
406 tgtid = (struct snic_tgt_id *) rqi->sge_va;
407
408 tgt_cnt = le32_to_cpu(fwreq->u.rpt_tgts_cmpl.tgt_cnt);
409 if (tgt_cnt == 0) {
410 SNIC_HOST_ERR(snic->shost, "No Targets Found on this host.\n");
411 ret = 1;
412
413 goto end;
414 }
415
416 /* printing list of targets here */
417 SNIC_HOST_INFO(snic->shost, "Target Count = %d\n", tgt_cnt);
418
419 SNIC_BUG_ON(tgt_cnt > snic->fwinfo.max_tgts);
420
421 for (i = 0; i < tgt_cnt; i++)
422 SNIC_HOST_INFO(snic->shost,
423 "Tgt id = 0x%x\n",
424 le32_to_cpu(tgtid[i].tgt_id));
425
426 /*
427 * Queue work for further processing,
428 * Response Buffer Memory is freed after creating targets
429 */
430 snic->disc.rtgt_cnt = tgt_cnt;
431 snic->disc.rtgt_info = (u8 *) tgtid;
432 queue_work(snic_glob->event_q, &snic->tgt_work);
433 ret = 0;
434
435end:
436 /* Unmap Response Buffer */
437 snic_pci_unmap_rsp_buf(snic, rqi);
438 if (ret)
439 kfree(tgtid);
440
441 rqi->sge_va = 0;
442 snic_release_untagged_req(snic, rqi);
443
444 return ret;
445} /* end of snic_report_tgt_cmpl_handler */
446
447/* Discovery init fn */
448void
449snic_disc_init(struct snic_disc *disc)
450{
451 INIT_LIST_HEAD(&disc->tgt_list);
452 mutex_init(&disc->mutex);
453 disc->disc_id = 0;
454 disc->nxt_tgt_id = 0;
455 disc->state = SNIC_DISC_INIT;
456 disc->req_cnt = 0;
457 disc->rtgt_cnt = 0;
458 disc->rtgt_info = NULL;
459 disc->cb = NULL;
460} /* end of snic_disc_init */
461
462/* Discovery, uninit fn */
463void
464snic_disc_term(struct snic *snic)
465{
466 struct snic_disc *disc = &snic->disc;
467
468 mutex_lock(&disc->mutex);
469 if (disc->req_cnt) {
470 disc->req_cnt = 0;
471 SNIC_SCSI_DBG(snic->shost, "Terminating Discovery.\n");
472 }
473 mutex_unlock(&disc->mutex);
474}
475
476/*
477 * snic_disc_start: Discovery Start ...
478 */
479int
480snic_disc_start(struct snic *snic)
481{
482 struct snic_disc *disc = &snic->disc;
483 int ret = 0;
484
485 SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n");
486
487 mutex_lock(&disc->mutex);
488 if (disc->state == SNIC_DISC_PENDING) {
489 disc->req_cnt++;
490 mutex_unlock(&disc->mutex);
491
492 return ret;
493 }
494 disc->state = SNIC_DISC_PENDING;
495 mutex_unlock(&disc->mutex);
496
497 ret = snic_queue_report_tgt_req(snic);
498 if (ret)
499 SNIC_HOST_INFO(snic->shost, "Discovery Failed, err=%d.\n", ret);
500
501 return ret;
502} /* end of snic_disc_start */
503
504/*
505 * snic_disc_work :
506 */
507void
508snic_handle_disc(struct work_struct *work)
509{
510 struct snic *snic = container_of(work, struct snic, disc_work);
511 int ret = 0;
512
513 SNIC_HOST_INFO(snic->shost, "disc_work: Discovery\n");
514
515 ret = snic_disc_start(snic);
516 if (ret)
517 goto disc_err;
518
519disc_err:
520 SNIC_HOST_ERR(snic->shost,
521 "disc_work: Discovery Failed w/ err = %d\n",
522 ret);
523} /* end of snic_disc_work */
524
525/*
526 * snic_tgt_del_all : cleanup all snic targets
527 * Called on unbinding the interface
528 */
529void
530snic_tgt_del_all(struct snic *snic)
531{
532 struct snic_tgt *tgt = NULL;
533 struct list_head *cur, *nxt;
534 unsigned long flags;
535
536 mutex_lock(&snic->disc.mutex);
537 spin_lock_irqsave(snic->shost->host_lock, flags);
538
539 list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
540 tgt = list_entry(cur, struct snic_tgt, list);
541 tgt->state = SNIC_TGT_STAT_DEL;
542 list_del_init(&tgt->list);
543 SNIC_HOST_INFO(snic->shost, "Tgt %d q'ing for del\n", tgt->id);
544 queue_work(snic_glob->event_q, &tgt->del_work);
545 tgt = NULL;
546 }
547 spin_unlock_irqrestore(snic->shost->host_lock, flags);
548
549 scsi_flush_work(snic->shost);
550 mutex_unlock(&snic->disc.mutex);
551} /* end of snic_tgt_del_all */
diff --git a/drivers/scsi/snic/snic_disc.h b/drivers/scsi/snic/snic_disc.h
new file mode 100644
index 000000000000..97fa3f5c5bb4
--- /dev/null
+++ b/drivers/scsi/snic/snic_disc.h
@@ -0,0 +1,124 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef __SNIC_DISC_H
19#define __SNIC_DISC_H
20
21#include "snic_fwint.h"
22
23enum snic_disc_state {
24 SNIC_DISC_NONE,
25 SNIC_DISC_INIT,
26 SNIC_DISC_PENDING,
27 SNIC_DISC_DONE
28};
29
30struct snic;
31struct snic_disc {
32 struct list_head tgt_list;
33 enum snic_disc_state state;
34 struct mutex mutex;
35 u16 disc_id;
36 u8 req_cnt;
37 u32 nxt_tgt_id;
38 u32 rtgt_cnt;
39 u8 *rtgt_info;
40 struct delayed_work disc_timeout;
41 void (*cb)(struct snic *);
42};
43
44#define SNIC_TGT_NAM_LEN 16
45
46enum snic_tgt_state {
47 SNIC_TGT_STAT_NONE,
48 SNIC_TGT_STAT_INIT,
49 SNIC_TGT_STAT_ONLINE, /* Target is Online */
50 SNIC_TGT_STAT_OFFLINE, /* Target is Offline */
51 SNIC_TGT_STAT_DEL,
52};
53
54struct snic_tgt_priv {
55 struct list_head list;
56 enum snic_tgt_type typ;
57 u16 disc_id;
58 char *name[SNIC_TGT_NAM_LEN];
59
60 union {
61 /*DAS Target specific info */
62 /*SAN Target specific info */
63 u8 dummmy;
64 } u;
65};
66
67/* snic tgt flags */
68#define SNIC_TGT_SCAN_PENDING 0x01
69
70struct snic_tgt {
71 struct list_head list;
72 u16 id;
73 u16 channel;
74 u32 flags;
75 u32 scsi_tgt_id;
76 enum snic_tgt_state state;
77 struct device dev;
78 struct work_struct scan_work;
79 struct work_struct del_work;
80 struct snic_tgt_priv tdata;
81};
82
83
84struct snic_fw_req;
85
86void snic_disc_init(struct snic_disc *);
87int snic_disc_start(struct snic *);
88void snic_disc_term(struct snic *);
89int snic_report_tgt_cmpl_handler(struct snic *, struct snic_fw_req *);
90int snic_tgtinfo_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq);
91void snic_process_report_tgts_rsp(struct work_struct *);
92void snic_handle_tgt_disc(struct work_struct *);
93void snic_handle_disc(struct work_struct *);
94void snic_tgt_dev_release(struct device *);
95void snic_tgt_del_all(struct snic *);
96
97#define dev_to_tgt(d) \
98 container_of(d, struct snic_tgt, dev)
99
100static inline int
101is_snic_target(struct device *dev)
102{
103 return dev->release == snic_tgt_dev_release;
104}
105
106#define starget_to_tgt(st) \
107 (is_snic_target(((struct scsi_target *) st)->dev.parent) ? \
108 dev_to_tgt(st->dev.parent) : NULL)
109
110#define snic_tgt_to_shost(t) \
111 dev_to_shost(t->dev.parent)
112
113static inline int
114snic_tgt_chkready(struct snic_tgt *tgt)
115{
116 if (tgt->state == SNIC_TGT_STAT_ONLINE)
117 return 0;
118 else
119 return DID_NO_CONNECT << 16;
120}
121
122const char *snic_tgt_state_to_str(int);
123int snic_tgt_scsi_abort_io(struct snic_tgt *);
124#endif /* end of __SNIC_DISC_H */
diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h
new file mode 100644
index 000000000000..2cfaf2dc915f
--- /dev/null
+++ b/drivers/scsi/snic/snic_fwint.h
@@ -0,0 +1,525 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef __SNIC_FWINT_H
19#define __SNIC_FWINT_H
20
21#define SNIC_CDB_LEN 32 /* SCSI CDB size 32, can be used for 16 bytes */
22#define LUN_ADDR_LEN 8
23
24/*
25 * Command entry type
26 */
27enum snic_io_type {
28 /*
29 * Initiator request types
30 */
31 SNIC_REQ_REPORT_TGTS = 0x2, /* Report Targets */
32 SNIC_REQ_ICMND, /* Initiator command for SCSI IO */
33 SNIC_REQ_ITMF, /* Initiator command for Task Mgmt */
34 SNIC_REQ_HBA_RESET, /* SNIC Reset */
35 SNIC_REQ_EXCH_VER, /* Exchange Version Information */
36 SNIC_REQ_TGT_INFO, /* Backend/Target Information */
37 SNIC_REQ_BOOT_LUNS,
38
39 /*
40 * Response type
41 */
42 SNIC_RSP_REPORT_TGTS_CMPL = 0x12,/* Report Targets Completion */
43 SNIC_RSP_ICMND_CMPL, /* SCSI IO Completion */
44 SNIC_RSP_ITMF_CMPL, /* Task Management Completion */
45 SNIC_RSP_HBA_RESET_CMPL, /* SNIC Reset Completion */
46 SNIC_RSP_EXCH_VER_CMPL, /* Exchange Version Completion*/
47 SNIC_RSP_BOOT_LUNS_CMPL,
48
49 /*
50 * Misc Request types
51 */
52 SNIC_MSG_ACK = 0x80, /* Ack: snic_notify_msg */
53 SNIC_MSG_ASYNC_EVNOTIFY, /* Asynchronous Event Notification */
54}; /* end of enum snic_io_type */
55
56
57/*
58 * Header status codes from firmware
59 */
60enum snic_io_status {
61 SNIC_STAT_IO_SUCCESS = 0, /* request was successful */
62
63 /*
64 * If a request to the fw is rejected, the original request header
65 * will be returned with the status set to one of the following:
66 */
67 SNIC_STAT_INVALID_HDR, /* header contains invalid data */
68 SNIC_STAT_OUT_OF_RES, /* out of resources to complete request */
69 SNIC_STAT_INVALID_PARM, /* some parameter in request is not valid */
70 SNIC_STAT_REQ_NOT_SUP, /* req type is not supported */
71 SNIC_STAT_IO_NOT_FOUND, /* requested IO was not found */
72
73 /*
74 * Once a request is processed, the fw will usually return
75 * a cmpl message type. In cases where errors occurred,
76 * the header status would be filled in with one of the following:
77 */
78 SNIC_STAT_ABORTED, /* req was aborted */
79 SNIC_STAT_TIMEOUT, /* req was timed out */
80 SNIC_STAT_SGL_INVALID, /* req was aborted due to sgl error */
81 SNIC_STAT_DATA_CNT_MISMATCH, /*recv/sent more/less data than expec */
82 SNIC_STAT_FW_ERR, /* req was terminated due to fw error */
83 SNIC_STAT_ITMF_REJECT, /* itmf req was rejected by target */
84 SNIC_STAT_ITMF_FAIL, /* itmf req was failed */
85 SNIC_STAT_ITMF_INCORRECT_LUN, /* itmf req has incorrect LUN id*/
86 SNIC_STAT_CMND_REJECT, /* req was invalid and rejected */
87 SNIC_STAT_DEV_OFFLINE, /* req sent to offline device */
88 SNIC_STAT_NO_BOOTLUN,
89 SNIC_STAT_SCSI_ERR, /* SCSI error returned by Target. */
90 SNIC_STAT_NOT_READY, /* sNIC Subsystem is not ready */
91 SNIC_STAT_FATAL_ERROR, /* sNIC is in unrecoverable state */
92}; /* end of enum snic_io_status */
93
94/*
95 * snic_io_hdr : host <--> firmare
96 *
97 * for any other message that will be queued to firmware should
98 * have the following request header
99 */
100struct snic_io_hdr {
101 __le32 hid;
102 __le32 cmnd_id; /* tag here */
103 ulong init_ctx; /* initiator context */
104 u8 type; /* request/response type */
105 u8 status; /* header status entry */
106 u8 protocol; /* Protocol specific, may needed for RoCE*/
107 u8 flags;
108 __le16 sg_cnt;
109 u16 resvd;
110};
111
112/* auxillary funciton for encoding the snic_io_hdr */
113static inline void
114snic_io_hdr_enc(struct snic_io_hdr *hdr, u8 typ, u8 status, u32 id, u32 hid,
115 u16 sg_cnt, ulong ctx)
116{
117 hdr->type = typ;
118 hdr->status = status;
119 hdr->protocol = 0;
120 hdr->hid = cpu_to_le32(hid);
121 hdr->cmnd_id = cpu_to_le32(id);
122 hdr->sg_cnt = cpu_to_le16(sg_cnt);
123 hdr->init_ctx = ctx;
124 hdr->flags = 0;
125}
126
127/* auxillary funciton for decoding the snic_io_hdr */
128static inline void
129snic_io_hdr_dec(struct snic_io_hdr *hdr, u8 *typ, u8 *stat, u32 *cmnd_id,
130 u32 *hid, ulong *ctx)
131{
132 *typ = hdr->type;
133 *stat = hdr->status;
134 *hid = le32_to_cpu(hdr->hid);
135 *cmnd_id = le32_to_cpu(hdr->cmnd_id);
136 *ctx = hdr->init_ctx;
137}
138
139/*
140 * snic_host_info: host -> firmware
141 *
142 * Used for sending host information to firmware, and request fw version
143 */
144struct snic_exch_ver_req {
145 __le32 drvr_ver; /* for debugging, when fw dump captured */
146 __le32 os_type; /* for OS specific features */
147};
148
149/*
150 * os_type flags
151 * Bit 0-7 : OS information
152 * Bit 8-31: Feature/Capability Information
153 */
154#define SNIC_OS_LINUX 0x1
155#define SNIC_OS_WIN 0x2
156#define SNIC_OS_ESX 0x3
157
158/*
159 * HBA Capabilities
160 * Bit 1: Reserved.
161 * Bit 2: Dynamic Discovery of LUNs.
162 * Bit 3: Async event notifications on on tgt online/offline events.
163 * Bit 4: IO timeout support in FW.
164 * Bit 5-31: Reserved.
165 */
166#define SNIC_HBA_CAP_DDL 0x02 /* Supports Dynamic Discovery of LUNs */
167#define SNIC_HBA_CAP_AEN 0x04 /* Supports Async Event Noitifcation */
168#define SNIC_HBA_CAP_TMO 0x08 /* Supports IO timeout in FW */
169
170/*
171 * snic_exch_ver_rsp : firmware -> host
172 *
173 * Used by firmware to send response to version request
174 */
175struct snic_exch_ver_rsp {
176 __le32 version;
177 __le32 hid;
178 __le32 max_concur_ios; /* max concurrent ios */
179 __le32 max_sgs_per_cmd; /* max sgls per IO */
180 __le32 max_io_sz; /* max io size supported */
181 __le32 hba_cap; /* hba capabilities */
182 __le32 max_tgts; /* max tgts supported */
183 __le16 io_timeout; /* FW extended timeout */
184 u16 rsvd;
185};
186
187
188/*
189 * snic_report_tgts : host -> firmware request
190 *
191 * Used by the host to request list of targets
192 */
193struct snic_report_tgts {
194 __le16 sg_cnt;
195 __le16 flags; /* specific flags from fw */
196 u8 _resvd[4];
197 __le64 sg_addr; /* Points to SGL */
198 __le64 sense_addr;
199};
200
201enum snic_type {
202 SNIC_NONE = 0x0,
203 SNIC_DAS,
204 SNIC_SAN,
205};
206
207
208/* Report Target Response */
209enum snic_tgt_type {
210 SNIC_TGT_NONE = 0x0,
211 SNIC_TGT_DAS, /* DAS Target */
212 SNIC_TGT_SAN, /* SAN Target */
213};
214
215/* target id format */
216struct snic_tgt_id {
217 __le32 tgt_id; /* target id */
218 __le16 tgt_type; /* tgt type */
219 __le16 vnic_id; /* corresponding vnic id */
220};
221
222/*
223 * snic_report_tgts_cmpl : firmware -> host response
224 *
225 * Used by firmware to send response to Report Targets request
226 */
227struct snic_report_tgts_cmpl {
228 __le32 tgt_cnt; /* Number of Targets accessible */
229 u32 _resvd;
230};
231
232/*
233 * Command flags
234 *
235 * Bit 0: Read flags
236 * Bit 1: Write flag
237 * Bit 2: ESGL - sg/esg array contains extended sg
238 * ESGE - is a host buffer contains sg elements
239 * Bit 3-4: Task Attributes
240 * 00b - simple
241 * 01b - head of queue
242 * 10b - ordered
243 * Bit 5-7: Priority - future use
244 * Bit 8-15: Reserved
245 */
246
247#define SNIC_ICMND_WR 0x01 /* write command */
248#define SNIC_ICMND_RD 0x02 /* read command */
249#define SNIC_ICMND_ESGL 0x04 /* SGE/ESGE array contains valid data*/
250
251/*
252 * Priority/Task Attribute settings
253 */
254#define SNIC_ICMND_TSK_SHIFT 2 /* task attr starts at bit 2 */
255#define SNIC_ICMND_TSK_MASK(x) ((x>>SNIC_ICMND_TSK_SHIFT) & ~(0xffff))
256#define SNIC_ICMND_TSK_SIMPLE 0 /* simple task attr */
257#define SNIC_ICMND_TSK_HEAD_OF_QUEUE 1 /* head of qeuue task attr */
258#define SNIC_ICMND_TSK_ORDERED 2 /* ordered task attr */
259
260#define SNIC_ICMND_PRI_SHIFT 5 /* prio val starts at bit 5 */
261
262/*
263 * snic_icmnd : host-> firmware request
264 *
265 * used for sending out an initiator SCSI 16/32-byte command
266 */
267struct snic_icmnd {
268 __le16 sg_cnt; /* Number of SG Elements */
269 __le16 flags; /* flags */
270 __le32 sense_len; /* Sense buffer length */
271 __le64 tgt_id; /* Destination Target ID */
272 __le64 lun_id; /* Destination LUN ID */
273 u8 cdb_len;
274 u8 _resvd;
275 __le16 time_out; /* ms time for Res allocations fw to handle io*/
276 __le32 data_len; /* Total number of bytes to be transferred */
277 u8 cdb[SNIC_CDB_LEN];
278 __le64 sg_addr; /* Points to SG List */
279 __le64 sense_addr; /* Sense buffer address */
280};
281
282
283/* Response flags */
284/* Bit 0: Under run
285 * Bit 1: Over Run
286 * Bit 2-7: Reserved
287 */
288#define SNIC_ICMND_CMPL_UNDR_RUN 0x01 /* resid under and valid */
289#define SNIC_ICMND_CMPL_OVER_RUN 0x02 /* resid over and valid */
290
291/*
292 * snic_icmnd_cmpl: firmware -> host response
293 *
294 * Used for sending the host a response to an icmnd (initiator command)
295 */
296struct snic_icmnd_cmpl {
297 u8 scsi_status; /* value as per SAM */
298 u8 flags;
299 __le16 sense_len; /* Sense Length */
300 __le32 resid; /* Residue : # bytes under or over run */
301};
302
303/*
304 * snic_itmf: host->firmware request
305 *
306 * used for requesting the firmware to abort a request and/or send out
307 * a task management function
308 *
309 * the req_id field is valid in case of abort task and clear task
310 */
311struct snic_itmf {
312 u8 tm_type; /* SCSI Task Management request */
313 u8 resvd;
314 __le16 flags; /* flags */
315 __le32 req_id; /* Command id of snic req to be aborted */
316 __le64 tgt_id; /* Target ID */
317 __le64 lun_id; /* Destination LUN ID */
318 __le16 timeout; /* in sec */
319};
320
321/*
322 * Task Management Request
323 */
324enum snic_itmf_tm_type {
325 SNIC_ITMF_ABTS_TASK = 0x01, /* Abort Task */
326 SNIC_ITMF_ABTS_TASK_SET, /* Abort Task Set */
327 SNIC_ITMF_CLR_TASK, /* Clear Task */
328 SNIC_ITMF_CLR_TASKSET, /* Clear Task Set */
329 SNIC_ITMF_LUN_RESET, /* Lun Reset */
330 SNIC_ITMF_ABTS_TASK_TERM, /* Supported for SAN Targets */
331};
332
333/*
334 * snic_itmf_cmpl: firmware -> host resposne
335 *
336 * used for sending the host a response for a itmf request
337 */
338struct snic_itmf_cmpl {
339 __le32 nterminated; /* # IOs terminated as a result of tmf */
340 u8 flags; /* flags */
341 u8 _resvd[3];
342};
343
344/*
345 * itmfl_cmpl flags
346 * Bit 0 : 1 - Num terminated field valid
347 * Bit 1 - 7 : Reserved
348 */
349#define SNIC_NUM_TERM_VALID 0x01 /* Number of IOs terminated */
350
351/*
352 * snic_hba_reset: host -> firmware request
353 *
354 * used for requesting firmware to reset snic
355 */
356struct snic_hba_reset {
357 __le16 flags; /* flags */
358 u8 _resvd[6];
359};
360
361/*
362 * snic_hba_reset_cmpl: firmware -> host response
363 *
364 * Used by firmware to respond to the host's hba reset request
365 */
366struct snic_hba_reset_cmpl {
367 u8 flags; /* flags : more info needs to be added*/
368 u8 _resvd[7];
369};
370
371/*
372 * snic_notify_msg: firmware -> host response
373 *
374 * Used by firmware to notify host of the last work queue entry received
375 */
376struct snic_notify_msg {
377 __le32 wqe_num; /* wq entry number */
378 u8 flags; /* flags, macros */
379 u8 _resvd[4];
380};
381
382
383#define SNIC_EVDATA_LEN 24 /* in bytes */
384/* snic_async_evnotify: firmware -> host notification
385 *
386 * Used by firmware to notify the host about configuration/state changes
387 */
388struct snic_async_evnotify {
389 u8 FLS_EVENT_DESC;
390 u8 vnic; /* vnic id */
391 u8 _resvd[2];
392 __le32 ev_id; /* Event ID */
393 u8 ev_data[SNIC_EVDATA_LEN]; /* Event Data */
394 u8 _resvd2[4];
395};
396
397/* async event flags */
398enum snic_ev_type {
399 SNIC_EV_TGT_OFFLINE = 0x01, /* Target Offline, PL contains TGT ID */
400 SNIC_EV_TGT_ONLINE, /* Target Online, PL contains TGT ID */
401 SNIC_EV_LUN_OFFLINE, /* LUN Offline, PL contains LUN ID */
402 SNIC_EV_LUN_ONLINE, /* LUN Online, PL contains LUN ID */
403 SNIC_EV_CONF_CHG, /* Dev Config/Attr Change Event */
404 SNIC_EV_TGT_ADDED, /* Target Added */
405 SNIC_EV_TGT_DELTD, /* Target Del'd, PL contains TGT ID */
406 SNIC_EV_LUN_ADDED, /* LUN Added */
407 SNIC_EV_LUN_DELTD, /* LUN Del'd, PL cont. TGT & LUN ID */
408
409 SNIC_EV_DISC_CMPL = 0x10, /* Discovery Completed Event */
410};
411
412
413#define SNIC_HOST_REQ_LEN 128 /*Exp length of host req, wq desc sz*/
414/* Payload 88 bytes = 128 - 24 - 16 */
415#define SNIC_HOST_REQ_PAYLOAD ((int)(SNIC_HOST_REQ_LEN - \
416 sizeof(struct snic_io_hdr) - \
417 (2 * sizeof(u64))))
418
419/*
420 * snic_host_req: host -> firmware request
421 *
422 * Basic structure for all snic requests that are sent from the host to
423 * firmware. They are 128 bytes in size.
424 */
425struct snic_host_req {
426 u64 ctrl_data[2]; /*16 bytes - Control Data */
427 struct snic_io_hdr hdr;
428 union {
429 /*
430 * Entry specific space, last byte contains color
431 */
432 u8 buf[SNIC_HOST_REQ_PAYLOAD];
433
434 /*
435 * Exchange firmware version
436 */
437 struct snic_exch_ver_req exch_ver;
438
439 /* report targets */
440 struct snic_report_tgts rpt_tgts;
441
442 /* io request */
443 struct snic_icmnd icmnd;
444
445 /* task management request */
446 struct snic_itmf itmf;
447
448 /* hba reset */
449 struct snic_hba_reset reset;
450 } u;
451}; /* end of snic_host_req structure */
452
453
454#define SNIC_FW_REQ_LEN 64 /* Expected length of fw req */
455struct snic_fw_req {
456 struct snic_io_hdr hdr;
457 union {
458 /*
459 * Entry specific space, last byte contains color
460 */
461 u8 buf[SNIC_FW_REQ_LEN - sizeof(struct snic_io_hdr)];
462
463 /* Exchange Version Response */
464 struct snic_exch_ver_rsp exch_ver_cmpl;
465
466 /* Report Targets Response */
467 struct snic_report_tgts_cmpl rpt_tgts_cmpl;
468
469 /* scsi response */
470 struct snic_icmnd_cmpl icmnd_cmpl;
471
472 /* task management response */
473 struct snic_itmf_cmpl itmf_cmpl;
474
475 /* hba reset response */
476 struct snic_hba_reset_cmpl reset_cmpl;
477
478 /* notify message */
479 struct snic_notify_msg ack;
480
481 /* async notification event */
482 struct snic_async_evnotify async_ev;
483
484 } u;
485}; /* end of snic_fw_req structure */
486
487/*
488 * Auxillary macro to verify specific snic req/cmpl structures
489 * to ensure that it will be aligned to 64 bit, and not using
490 * color bit field
491 */
492#define VERIFY_REQ_SZ(x)
493#define VERIFY_CMPL_SZ(x)
494
495/*
496 * Access routines to encode and decode the color bit, which is the most
497 * significant bit of the structure.
498 */
499static inline void
500snic_color_enc(struct snic_fw_req *req, u8 color)
501{
502 u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1;
503
504 if (color)
505 *c |= 0x80;
506 else
507 *c &= ~0x80;
508}
509
510static inline void
511snic_color_dec(struct snic_fw_req *req, u8 *color)
512{
513 u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1;
514
515 *color = *c >> 7;
516
517 /* Make sure color bit is read from desc *before* other fields
518 * are read from desc. Hardware guarantees color bit is last
519 * bit (byte) written. Adding the rmb() prevents the compiler
520 * and/or CPU from reordering the reads which would potentially
521 * result in reading stale values.
522 */
523 rmb();
524}
525#endif /* end of __SNIC_FWINT_H */
diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c
new file mode 100644
index 000000000000..993db7de4e4b
--- /dev/null
+++ b/drivers/scsi/snic/snic_io.c
@@ -0,0 +1,518 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#include <linux/errno.h>
19#include <linux/pci.h>
20#include <linux/slab.h>
21
22#include <linux/interrupt.h>
23#include <linux/workqueue.h>
24#include <linux/spinlock.h>
25#include <linux/mempool.h>
26#include <scsi/scsi_tcq.h>
27
28#include "snic_io.h"
29#include "snic.h"
30#include "cq_enet_desc.h"
31#include "snic_fwint.h"
32
33static void
34snic_wq_cmpl_frame_send(struct vnic_wq *wq,
35 struct cq_desc *cq_desc,
36 struct vnic_wq_buf *buf,
37 void *opaque)
38{
39 struct snic *snic = svnic_dev_priv(wq->vdev);
40
41 SNIC_BUG_ON(buf->os_buf == NULL);
42
43 if (snic_log_level & SNIC_DESC_LOGGING)
44 SNIC_HOST_INFO(snic->shost,
45 "Ack received for snic_host_req %p.\n",
46 buf->os_buf);
47
48 SNIC_TRC(snic->shost->host_no, 0, 0,
49 ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0,
50 0);
51 pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
52 buf->os_buf = NULL;
53}
54
55static int
56snic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
57 struct cq_desc *cq_desc,
58 u8 type,
59 u16 q_num,
60 u16 cmpl_idx,
61 void *opaque)
62{
63 struct snic *snic = svnic_dev_priv(vdev);
64 unsigned long flags;
65
66 SNIC_BUG_ON(q_num != 0);
67
68 spin_lock_irqsave(&snic->wq_lock[q_num], flags);
69 svnic_wq_service(&snic->wq[q_num],
70 cq_desc,
71 cmpl_idx,
72 snic_wq_cmpl_frame_send,
73 NULL);
74 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
75
76 return 0;
77} /* end of snic_cmpl_handler_cont */
78
79int
80snic_wq_cmpl_handler(struct snic *snic, int work_to_do)
81{
82 unsigned int work_done = 0;
83 unsigned int i;
84
85 snic->s_stats.misc.last_ack_time = jiffies;
86 for (i = 0; i < snic->wq_count; i++) {
87 work_done += svnic_cq_service(&snic->cq[i],
88 work_to_do,
89 snic_wq_cmpl_handler_cont,
90 NULL);
91 }
92
93 return work_done;
94} /* end of snic_wq_cmpl_handler */
95
96void
97snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
98{
99
100 struct snic_host_req *req = buf->os_buf;
101 struct snic *snic = svnic_dev_priv(wq->vdev);
102 struct snic_req_info *rqi = NULL;
103 unsigned long flags;
104
105 pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
106
107 rqi = req_to_rqi(req);
108 spin_lock_irqsave(&snic->spl_cmd_lock, flags);
109 if (list_empty(&rqi->list)) {
110 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
111 goto end;
112 }
113
114 SNIC_BUG_ON(rqi->list.next == NULL); /* if not added to spl_cmd_list */
115 list_del_init(&rqi->list);
116 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
117
118 if (rqi->sge_va) {
119 snic_pci_unmap_rsp_buf(snic, rqi);
120 kfree((void *)rqi->sge_va);
121 rqi->sge_va = 0;
122 }
123 snic_req_free(snic, rqi);
124 SNIC_HOST_INFO(snic->shost, "snic_free_wq_buf .. freed.\n");
125
126end:
127 return;
128}
129
130/* Criteria to select work queue in multi queue mode */
131static int
132snic_select_wq(struct snic *snic)
133{
134 /* No multi queue support for now */
135 BUILD_BUG_ON(SNIC_WQ_MAX > 1);
136
137 return 0;
138}
139
140int
141snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
142{
143 dma_addr_t pa = 0;
144 unsigned long flags;
145 struct snic_fw_stats *fwstats = &snic->s_stats.fw;
146 long act_reqs;
147 int q_num = 0;
148
149 snic_print_desc(__func__, os_buf, len);
150
151 /* Map request buffer */
152 pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE);
153 if (pci_dma_mapping_error(snic->pdev, pa)) {
154 SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
155
156 return -ENOMEM;
157 }
158
159 q_num = snic_select_wq(snic);
160
161 spin_lock_irqsave(&snic->wq_lock[q_num], flags);
162 if (!svnic_wq_desc_avail(snic->wq)) {
163 pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE);
164 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
165 atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
166 SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no);
167
168 return -ENOMEM;
169 }
170
171 snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1);
172 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
173
174 /* Update stats */
175 act_reqs = atomic64_inc_return(&fwstats->actv_reqs);
176 if (act_reqs > atomic64_read(&fwstats->max_actv_reqs))
177 atomic64_set(&fwstats->max_actv_reqs, act_reqs);
178
179 return 0;
180} /* end of snic_queue_wq_desc() */
181
182/*
183 * snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list.
184 * Purpose : Used during driver unload to clean up the requests.
185 */
186void
187snic_handle_untagged_req(struct snic *snic, struct snic_req_info *rqi)
188{
189 unsigned long flags;
190
191 INIT_LIST_HEAD(&rqi->list);
192
193 spin_lock_irqsave(&snic->spl_cmd_lock, flags);
194 list_add_tail(&rqi->list, &snic->spl_cmd_list);
195 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
196}
197
198/*
199 * snic_req_init:
200 * Allocates snic_req_info + snic_host_req + sgl data, and initializes.
201 */
202struct snic_req_info *
203snic_req_init(struct snic *snic, int sg_cnt)
204{
205 u8 typ;
206 struct snic_req_info *rqi = NULL;
207
208 typ = (sg_cnt <= SNIC_REQ_CACHE_DFLT_SGL) ?
209 SNIC_REQ_CACHE_DFLT_SGL : SNIC_REQ_CACHE_MAX_SGL;
210
211 rqi = mempool_alloc(snic->req_pool[typ], GFP_ATOMIC);
212 if (!rqi) {
213 atomic64_inc(&snic->s_stats.io.alloc_fail);
214 SNIC_HOST_ERR(snic->shost,
215 "Failed to allocate memory from snic req pool id = %d\n",
216 typ);
217 return rqi;
218 }
219
220 memset(rqi, 0, sizeof(*rqi));
221 rqi->rq_pool_type = typ;
222 rqi->start_time = jiffies;
223 rqi->req = (struct snic_host_req *) (rqi + 1);
224 rqi->req_len = sizeof(struct snic_host_req);
225 rqi->snic = snic;
226
227 rqi->req = (struct snic_host_req *)(rqi + 1);
228
229 if (sg_cnt == 0)
230 goto end;
231
232 rqi->req_len += (sg_cnt * sizeof(struct snic_sg_desc));
233
234 if (sg_cnt > atomic64_read(&snic->s_stats.io.max_sgl))
235 atomic64_set(&snic->s_stats.io.max_sgl, sg_cnt);
236
237 SNIC_BUG_ON(sg_cnt > SNIC_MAX_SG_DESC_CNT);
238 atomic64_inc(&snic->s_stats.io.sgl_cnt[sg_cnt - 1]);
239
240end:
241 memset(rqi->req, 0, rqi->req_len);
242
243 /* pre initialization of init_ctx to support req_to_rqi */
244 rqi->req->hdr.init_ctx = (ulong) rqi;
245
246 SNIC_SCSI_DBG(snic->shost, "Req_alloc:rqi = %p allocatd.\n", rqi);
247
248 return rqi;
249} /* end of snic_req_init */
250
251/*
252 * snic_abort_req_init : Inits abort request.
253 */
254struct snic_host_req *
255snic_abort_req_init(struct snic *snic, struct snic_req_info *rqi)
256{
257 struct snic_host_req *req = NULL;
258
259 SNIC_BUG_ON(!rqi);
260
261 /* If abort to be issued second time, then reuse */
262 if (rqi->abort_req)
263 return rqi->abort_req;
264
265
266 req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
267 if (!req) {
268 SNIC_HOST_ERR(snic->shost, "abts:Failed to alloc tm req.\n");
269 WARN_ON_ONCE(1);
270
271 return NULL;
272 }
273
274 rqi->abort_req = req;
275 memset(req, 0, sizeof(struct snic_host_req));
276 /* pre initialization of init_ctx to support req_to_rqi */
277 req->hdr.init_ctx = (ulong) rqi;
278
279 return req;
280} /* end of snic_abort_req_init */
281
282/*
283 * snic_dr_req_init : Inits device reset req
284 */
285struct snic_host_req *
286snic_dr_req_init(struct snic *snic, struct snic_req_info *rqi)
287{
288 struct snic_host_req *req = NULL;
289
290 SNIC_BUG_ON(!rqi);
291
292 req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
293 if (!req) {
294 SNIC_HOST_ERR(snic->shost, "dr:Failed to alloc tm req.\n");
295 WARN_ON_ONCE(1);
296
297 return NULL;
298 }
299
300 SNIC_BUG_ON(rqi->dr_req != NULL);
301 rqi->dr_req = req;
302 memset(req, 0, sizeof(struct snic_host_req));
303 /* pre initialization of init_ctx to support req_to_rqi */
304 req->hdr.init_ctx = (ulong) rqi;
305
306 return req;
307} /* end of snic_dr_req_init */
308
309/* frees snic_req_info and snic_host_req */
310void
311snic_req_free(struct snic *snic, struct snic_req_info *rqi)
312{
313 SNIC_BUG_ON(rqi->req == rqi->abort_req);
314 SNIC_BUG_ON(rqi->req == rqi->dr_req);
315 SNIC_BUG_ON(rqi->sge_va != 0);
316
317 SNIC_SCSI_DBG(snic->shost,
318 "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n",
319 rqi, rqi->req, rqi->abort_req, rqi->dr_req);
320
321 if (rqi->abort_req)
322 mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
323
324 if (rqi->dr_req)
325 mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
326
327 mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
328}
329
330void
331snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi)
332{
333 struct snic_sg_desc *sgd;
334
335 sgd = req_to_sgl(rqi_to_req(rqi));
336 SNIC_BUG_ON(sgd[0].addr == 0);
337 pci_unmap_single(snic->pdev,
338 le64_to_cpu(sgd[0].addr),
339 le32_to_cpu(sgd[0].len),
340 PCI_DMA_FROMDEVICE);
341}
342
343/*
344 * snic_free_all_untagged_reqs: Walks through untagged reqs and frees them.
345 */
346void
347snic_free_all_untagged_reqs(struct snic *snic)
348{
349 struct snic_req_info *rqi;
350 struct list_head *cur, *nxt;
351 unsigned long flags;
352
353 spin_lock_irqsave(&snic->spl_cmd_lock, flags);
354 list_for_each_safe(cur, nxt, &snic->spl_cmd_list) {
355 rqi = list_entry(cur, struct snic_req_info, list);
356 list_del_init(&rqi->list);
357 if (rqi->sge_va) {
358 snic_pci_unmap_rsp_buf(snic, rqi);
359 kfree((void *)rqi->sge_va);
360 rqi->sge_va = 0;
361 }
362
363 snic_req_free(snic, rqi);
364 }
365 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
366}
367
368/*
369 * snic_release_untagged_req : Unlinks the untagged req and frees it.
370 */
371void
372snic_release_untagged_req(struct snic *snic, struct snic_req_info *rqi)
373{
374 unsigned long flags;
375
376 spin_lock_irqsave(&snic->snic_lock, flags);
377 if (snic->in_remove) {
378 spin_unlock_irqrestore(&snic->snic_lock, flags);
379 goto end;
380 }
381 spin_unlock_irqrestore(&snic->snic_lock, flags);
382
383 spin_lock_irqsave(&snic->spl_cmd_lock, flags);
384 if (list_empty(&rqi->list)) {
385 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
386 goto end;
387 }
388 list_del_init(&rqi->list);
389 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
390 snic_req_free(snic, rqi);
391
392end:
393 return;
394}
395
396/* dump buf in hex fmt */
397void
398snic_hex_dump(char *pfx, char *data, int len)
399{
400 SNIC_INFO("%s Dumping Data of Len = %d\n", pfx, len);
401 print_hex_dump_bytes(pfx, DUMP_PREFIX_NONE, data, len);
402}
403
404#define LINE_BUFSZ 128 /* for snic_print_desc fn */
405static void
406snic_dump_desc(const char *fn, char *os_buf, int len)
407{
408 struct snic_host_req *req = (struct snic_host_req *) os_buf;
409 struct snic_fw_req *fwreq = (struct snic_fw_req *) os_buf;
410 struct snic_req_info *rqi = NULL;
411 char line[LINE_BUFSZ] = { '\0' };
412 char *cmd_str = NULL;
413
414 if (req->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL)
415 rqi = (struct snic_req_info *) fwreq->hdr.init_ctx;
416 else
417 rqi = (struct snic_req_info *) req->hdr.init_ctx;
418
419 SNIC_BUG_ON(rqi == NULL || rqi->req == NULL);
420 switch (req->hdr.type) {
421 case SNIC_REQ_REPORT_TGTS:
422 cmd_str = "report-tgt : ";
423 snprintf(line, LINE_BUFSZ, "SNIC_REQ_REPORT_TGTS :");
424 break;
425
426 case SNIC_REQ_ICMND:
427 cmd_str = "icmnd : ";
428 snprintf(line, LINE_BUFSZ, "SNIC_REQ_ICMND : 0x%x :",
429 req->u.icmnd.cdb[0]);
430 break;
431
432 case SNIC_REQ_ITMF:
433 cmd_str = "itmf : ";
434 snprintf(line, LINE_BUFSZ, "SNIC_REQ_ITMF :");
435 break;
436
437 case SNIC_REQ_HBA_RESET:
438 cmd_str = "hba reset :";
439 snprintf(line, LINE_BUFSZ, "SNIC_REQ_HBA_RESET :");
440 break;
441
442 case SNIC_REQ_EXCH_VER:
443 cmd_str = "exch ver : ";
444 snprintf(line, LINE_BUFSZ, "SNIC_REQ_EXCH_VER :");
445 break;
446
447 case SNIC_REQ_TGT_INFO:
448 cmd_str = "tgt info : ";
449 break;
450
451 case SNIC_RSP_REPORT_TGTS_CMPL:
452 cmd_str = "report tgt cmpl : ";
453 snprintf(line, LINE_BUFSZ, "SNIC_RSP_REPORT_TGTS_CMPL :");
454 break;
455
456 case SNIC_RSP_ICMND_CMPL:
457 cmd_str = "icmnd_cmpl : ";
458 snprintf(line, LINE_BUFSZ, "SNIC_RSP_ICMND_CMPL : 0x%x :",
459 rqi->req->u.icmnd.cdb[0]);
460 break;
461
462 case SNIC_RSP_ITMF_CMPL:
463 cmd_str = "itmf_cmpl : ";
464 snprintf(line, LINE_BUFSZ, "SNIC_RSP_ITMF_CMPL :");
465 break;
466
467 case SNIC_RSP_HBA_RESET_CMPL:
468 cmd_str = "hba_reset_cmpl : ";
469 snprintf(line, LINE_BUFSZ, "SNIC_RSP_HBA_RESET_CMPL :");
470 break;
471
472 case SNIC_RSP_EXCH_VER_CMPL:
473 cmd_str = "exch_ver_cmpl : ";
474 snprintf(line, LINE_BUFSZ, "SNIC_RSP_EXCH_VER_CMPL :");
475 break;
476
477 case SNIC_MSG_ACK:
478 cmd_str = "msg ack : ";
479 snprintf(line, LINE_BUFSZ, "SNIC_MSG_ACK :");
480 break;
481
482 case SNIC_MSG_ASYNC_EVNOTIFY:
483 cmd_str = "async notify : ";
484 snprintf(line, LINE_BUFSZ, "SNIC_MSG_ASYNC_EVNOTIFY :");
485 break;
486
487 default:
488 cmd_str = "unknown : ";
489 SNIC_BUG_ON(1);
490 break;
491 }
492
493 SNIC_INFO("%s:%s >>cmndid=%x:sg_cnt = %x:status = %x:ctx = %lx.\n",
494 fn, line, req->hdr.cmnd_id, req->hdr.sg_cnt, req->hdr.status,
495 req->hdr.init_ctx);
496
497 /* Enable it, to dump byte stream */
498 if (snic_log_level & 0x20)
499 snic_hex_dump(cmd_str, os_buf, len);
500} /* end of __snic_print_desc */
501
502void
503snic_print_desc(const char *fn, char *os_buf, int len)
504{
505 if (snic_log_level & SNIC_DESC_LOGGING)
506 snic_dump_desc(fn, os_buf, len);
507}
508
509void
510snic_calc_io_process_time(struct snic *snic, struct snic_req_info *rqi)
511{
512 u64 duration;
513
514 duration = jiffies - rqi->start_time;
515
516 if (duration > atomic64_read(&snic->s_stats.io.max_time))
517 atomic64_set(&snic->s_stats.io.max_time, duration);
518}
diff --git a/drivers/scsi/snic/snic_io.h b/drivers/scsi/snic/snic_io.h
new file mode 100644
index 000000000000..093d6524cd42
--- /dev/null
+++ b/drivers/scsi/snic/snic_io.h
@@ -0,0 +1,118 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef _SNIC_IO_H
19#define _SNIC_IO_H
20
21#define SNIC_DFLT_SG_DESC_CNT 32 /* Default descriptors for sgl */
22#define SNIC_MAX_SG_DESC_CNT 60 /* Max descriptor for sgl */
23#define SNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
24
25/* SG descriptor for snic */
26struct snic_sg_desc {
27 __le64 addr;
28 __le32 len;
29 u32 _resvd;
30};
31
32struct snic_dflt_sgl {
33 struct snic_sg_desc sg_desc[SNIC_DFLT_SG_DESC_CNT];
34};
35
36struct snic_max_sgl {
37 struct snic_sg_desc sg_desc[SNIC_MAX_SG_DESC_CNT];
38};
39
40enum snic_req_cache_type {
41 SNIC_REQ_CACHE_DFLT_SGL = 0, /* cache with default size sgl */
42 SNIC_REQ_CACHE_MAX_SGL, /* cache with max size sgl */
43 SNIC_REQ_TM_CACHE, /* cache for task mgmt reqs contains
44 snic_host_req objects only*/
45 SNIC_REQ_MAX_CACHES /* number of sgl caches */
46};
47
48/* Per IO internal state */
49struct snic_internal_io_state {
50 char *rqi;
51 u64 flags;
52 u32 state;
53 u32 abts_status; /* Abort completion status */
54 u32 lr_status; /* device reset completion status */
55};
56
57/* IO state machine */
58enum snic_ioreq_state {
59 SNIC_IOREQ_NOT_INITED = 0,
60 SNIC_IOREQ_PENDING,
61 SNIC_IOREQ_ABTS_PENDING,
62 SNIC_IOREQ_ABTS_COMPLETE,
63 SNIC_IOREQ_LR_PENDING,
64 SNIC_IOREQ_LR_COMPLETE,
65 SNIC_IOREQ_COMPLETE,
66};
67
68struct snic;
69struct snic_host_req;
70
71/*
72 * snic_req_info : Contains info about IO, one per scsi command.
73 * Notes: Make sure that the structure is aligned to 16 B
74 * this helps in easy access to snic_req_info from snic_host_req
75 */
76struct snic_req_info {
77 struct list_head list;
78 struct snic_host_req *req;
79 u64 start_time; /* start time in jiffies */
80 u16 rq_pool_type; /* noticion of request pool type */
81 u16 req_len; /* buf len passing to fw (req + sgl)*/
82 u32 tgt_id;
83
84 u32 tm_tag;
85 u8 io_cmpl:1; /* sets to 1 when fw completes IO */
86 u8 resvd[3];
87 struct scsi_cmnd *sc; /* Associated scsi cmd */
88 struct snic *snic; /* Associated snic */
89 ulong sge_va; /* Pointer to Resp Buffer */
90 u64 snsbuf_va;
91
92 struct snic_host_req *abort_req;
93 struct completion *abts_done;
94
95 struct snic_host_req *dr_req;
96 struct completion *dr_done;
97};
98
99
100#define rqi_to_req(rqi) \
101 ((struct snic_host_req *) (((struct snic_req_info *)rqi)->req))
102
103#define req_to_rqi(req) \
104 ((struct snic_req_info *) (((struct snic_host_req *)req)->hdr.init_ctx))
105
106#define req_to_sgl(req) \
107 ((struct snic_sg_desc *) (((struct snic_host_req *)req)+1))
108
109struct snic_req_info *
110snic_req_init(struct snic *, int sg_cnt);
111void snic_req_free(struct snic *, struct snic_req_info *);
112void snic_calc_io_process_time(struct snic *, struct snic_req_info *);
113void snic_pci_unmap_rsp_buf(struct snic *, struct snic_req_info *);
114struct snic_host_req *
115snic_abort_req_init(struct snic *, struct snic_req_info *);
116struct snic_host_req *
117snic_dr_req_init(struct snic *, struct snic_req_info *);
118#endif /* _SNIC_IO_H */
diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c
new file mode 100644
index 000000000000..a85fae25ec8c
--- /dev/null
+++ b/drivers/scsi/snic/snic_isr.c
@@ -0,0 +1,204 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#include <linux/string.h>
19#include <linux/errno.h>
20#include <linux/pci.h>
21#include <linux/interrupt.h>
22
23#include "vnic_dev.h"
24#include "vnic_intr.h"
25#include "vnic_stats.h"
26#include "snic_io.h"
27#include "snic.h"
28
29
30/*
31 * snic_isr_msix_wq : MSIx ISR for work queue.
32 */
33
34static irqreturn_t
35snic_isr_msix_wq(int irq, void *data)
36{
37 struct snic *snic = data;
38 unsigned long wq_work_done = 0;
39
40 snic->s_stats.misc.last_isr_time = jiffies;
41 atomic64_inc(&snic->s_stats.misc.isr_cnt);
42
43 wq_work_done = snic_wq_cmpl_handler(snic, -1);
44 svnic_intr_return_credits(&snic->intr[SNIC_MSIX_WQ],
45 wq_work_done,
46 1 /* unmask intr */,
47 1 /* reset intr timer */);
48
49 return IRQ_HANDLED;
50} /* end of snic_isr_msix_wq */
51
52static irqreturn_t
53snic_isr_msix_io_cmpl(int irq, void *data)
54{
55 struct snic *snic = data;
56 unsigned long iocmpl_work_done = 0;
57
58 snic->s_stats.misc.last_isr_time = jiffies;
59 atomic64_inc(&snic->s_stats.misc.isr_cnt);
60
61 iocmpl_work_done = snic_fwcq_cmpl_handler(snic, -1);
62 svnic_intr_return_credits(&snic->intr[SNIC_MSIX_IO_CMPL],
63 iocmpl_work_done,
64 1 /* unmask intr */,
65 1 /* reset intr timer */);
66
67 return IRQ_HANDLED;
68} /* end of snic_isr_msix_io_cmpl */
69
70static irqreturn_t
71snic_isr_msix_err_notify(int irq, void *data)
72{
73 struct snic *snic = data;
74
75 snic->s_stats.misc.last_isr_time = jiffies;
76 atomic64_inc(&snic->s_stats.misc.isr_cnt);
77
78 svnic_intr_return_all_credits(&snic->intr[SNIC_MSIX_ERR_NOTIFY]);
79 snic_log_q_error(snic);
80
81 /*Handling link events */
82 snic_handle_link_event(snic);
83
84 return IRQ_HANDLED;
85} /* end of snic_isr_msix_err_notify */
86
87
88void
89snic_free_intr(struct snic *snic)
90{
91 int i;
92
93 /* ONLY interrupt mode MSIX is supported */
94 for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
95 if (snic->msix[i].requested) {
96 free_irq(snic->msix_entry[i].vector,
97 snic->msix[i].devid);
98 }
99 }
100} /* end of snic_free_intr */
101
102int
103snic_request_intr(struct snic *snic)
104{
105 int ret = 0, i;
106 enum vnic_dev_intr_mode intr_mode;
107
108 intr_mode = svnic_dev_get_intr_mode(snic->vdev);
109 SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
110
111 /*
112 * Currently HW supports single WQ and CQ. So passing devid as snic.
113 * When hardware supports multiple WQs and CQs, one idea is
114 * to pass devid as corresponding WQ or CQ ptr and retrieve snic
115 * from queue ptr.
116 * Except for err_notify, which is always one.
117 */
118 sprintf(snic->msix[SNIC_MSIX_WQ].devname,
119 "%.11s-scsi-wq",
120 snic->name);
121 snic->msix[SNIC_MSIX_WQ].isr = snic_isr_msix_wq;
122 snic->msix[SNIC_MSIX_WQ].devid = snic;
123
124 sprintf(snic->msix[SNIC_MSIX_IO_CMPL].devname,
125 "%.11s-io-cmpl",
126 snic->name);
127 snic->msix[SNIC_MSIX_IO_CMPL].isr = snic_isr_msix_io_cmpl;
128 snic->msix[SNIC_MSIX_IO_CMPL].devid = snic;
129
130 sprintf(snic->msix[SNIC_MSIX_ERR_NOTIFY].devname,
131 "%.11s-err-notify",
132 snic->name);
133 snic->msix[SNIC_MSIX_ERR_NOTIFY].isr = snic_isr_msix_err_notify;
134 snic->msix[SNIC_MSIX_ERR_NOTIFY].devid = snic;
135
136 for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
137 ret = request_irq(snic->msix_entry[i].vector,
138 snic->msix[i].isr,
139 0,
140 snic->msix[i].devname,
141 snic->msix[i].devid);
142 if (ret) {
143 SNIC_HOST_ERR(snic->shost,
144 "MSI-X: requrest_irq(%d) failed %d\n",
145 i,
146 ret);
147 snic_free_intr(snic);
148 break;
149 }
150 snic->msix[i].requested = 1;
151 }
152
153 return ret;
154} /* end of snic_requrest_intr */
155
156int
157snic_set_intr_mode(struct snic *snic)
158{
159 unsigned int n = ARRAY_SIZE(snic->wq);
160 unsigned int m = SNIC_CQ_IO_CMPL_MAX;
161 unsigned int i;
162
163 /*
164 * We need n WQs, m CQs, and n+m+1 INTRs
165 * (last INTR is used for WQ/CQ errors and notification area
166 */
167
168 BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) >
169 ARRAY_SIZE(snic->intr));
170 SNIC_BUG_ON(ARRAY_SIZE(snic->msix_entry) < (n + m + 1));
171
172 for (i = 0; i < (n + m + 1); i++)
173 snic->msix_entry[i].entry = i;
174
175 if (snic->wq_count >= n && snic->cq_count >= (n + m)) {
176 if (!pci_enable_msix(snic->pdev,
177 snic->msix_entry,
178 (n + m + 1))) {
179 snic->wq_count = n;
180 snic->cq_count = n + m;
181 snic->intr_count = n + m + 1;
182 snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY;
183
184 SNIC_ISR_DBG(snic->shost,
185 "Using MSI-X Interrupts\n");
186 svnic_dev_set_intr_mode(snic->vdev,
187 VNIC_DEV_INTR_MODE_MSIX);
188
189 return 0;
190 }
191 }
192
193 svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
194
195 return -EINVAL;
196} /* end of snic_set_intr_mode */
197
198void
199snic_clear_intr_mode(struct snic *snic)
200{
201 pci_disable_msix(snic->pdev);
202
203 svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_INTX);
204}
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
new file mode 100644
index 000000000000..b2b87cef00fc
--- /dev/null
+++ b/drivers/scsi/snic/snic_main.c
@@ -0,0 +1,1044 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#include <linux/module.h>
19#include <linux/mempool.h>
20#include <linux/string.h>
21#include <linux/slab.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/pci.h>
25#include <linux/skbuff.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/workqueue.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_tcq.h>
31
32#include "snic.h"
33#include "snic_fwint.h"
34
35#define PCI_DEVICE_ID_CISCO_SNIC 0x0046
36
37/* Supported devices by snic module */
38static struct pci_device_id snic_id_table[] = {
39 {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) },
40 { 0, } /* end of table */
41};
42
43unsigned int snic_log_level = 0x0;
44module_param(snic_log_level, int, S_IRUGO|S_IWUSR);
45MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels");
46
47#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
48unsigned int snic_trace_max_pages = 16;
49module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
50MODULE_PARM_DESC(snic_trace_max_pages,
51 "Total allocated memory pages for snic trace buffer");
52
53#endif
54unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH;
55module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR);
56MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN");
57
58/*
59 * snic_slave_alloc : callback function to SCSI Mid Layer, called on
60 * scsi device initialization.
61 */
62static int
63snic_slave_alloc(struct scsi_device *sdev)
64{
65 struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
66
67 if (!tgt || snic_tgt_chkready(tgt))
68 return -ENXIO;
69
70 return 0;
71}
72
73/*
74 * snic_slave_configure : callback function to SCSI Mid Layer, called on
75 * scsi device initialization.
76 */
77static int
78snic_slave_configure(struct scsi_device *sdev)
79{
80 struct snic *snic = shost_priv(sdev->host);
81 u32 qdepth = 0, max_ios = 0;
82 int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ;
83
84 /* Set Queue Depth */
85 max_ios = snic_max_qdepth;
86 qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH);
87 scsi_change_queue_depth(sdev, qdepth);
88
89 if (snic->fwinfo.io_tmo > 1)
90 tmo = snic->fwinfo.io_tmo * HZ;
91
92 /* FW requires extended timeouts */
93 blk_queue_rq_timeout(sdev->request_queue, tmo);
94
95 return 0;
96}
97
98static int
99snic_change_queue_depth(struct scsi_device *sdev, int qdepth)
100{
101 int qsz = 0;
102
103 qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH);
104 scsi_change_queue_depth(sdev, qsz);
105 SNIC_INFO("QDepth Changed to %d\n", sdev->queue_depth);
106
107 return sdev->queue_depth;
108}
109
110static struct scsi_host_template snic_host_template = {
111 .module = THIS_MODULE,
112 .name = SNIC_DRV_NAME,
113 .queuecommand = snic_queuecommand,
114 .eh_abort_handler = snic_abort_cmd,
115 .eh_device_reset_handler = snic_device_reset,
116 .eh_host_reset_handler = snic_host_reset,
117 .slave_alloc = snic_slave_alloc,
118 .slave_configure = snic_slave_configure,
119 .change_queue_depth = snic_change_queue_depth,
120 .this_id = -1,
121 .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
122 .can_queue = SNIC_MAX_IO_REQ,
123 .use_clustering = ENABLE_CLUSTERING,
124 .sg_tablesize = SNIC_MAX_SG_DESC_CNT,
125 .max_sectors = 0x800,
126 .shost_attrs = snic_attrs,
127 .use_blk_tags = 1,
128 .track_queue_depth = 1,
129 .cmd_size = sizeof(struct snic_internal_io_state),
130 .proc_name = "snic_scsi",
131};
132
133/*
134 * snic_handle_link_event : Handles link events such as link up/down/error
135 */
136void
137snic_handle_link_event(struct snic *snic)
138{
139 unsigned long flags;
140
141 spin_lock_irqsave(&snic->snic_lock, flags);
142 if (snic->stop_link_events) {
143 spin_unlock_irqrestore(&snic->snic_lock, flags);
144
145 return;
146 }
147 spin_unlock_irqrestore(&snic->snic_lock, flags);
148
149 queue_work(snic_glob->event_q, &snic->link_work);
150} /* end of snic_handle_link_event */
151
152/*
153 * snic_notify_set : sets notification area
154 * This notification area is to receive events from fw
155 * Note: snic supports only MSIX interrupts, in which we can just call
156 * svnic_dev_notify_set directly
157 */
158static int
159snic_notify_set(struct snic *snic)
160{
161 int ret = 0;
162 enum vnic_dev_intr_mode intr_mode;
163
164 intr_mode = svnic_dev_get_intr_mode(snic->vdev);
165
166 if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) {
167 ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY);
168 } else {
169 SNIC_HOST_ERR(snic->shost,
170 "Interrupt mode should be setup before devcmd notify set %d\n",
171 intr_mode);
172 ret = -1;
173 }
174
175 return ret;
176} /* end of snic_notify_set */
177
178/*
179 * snic_dev_wait : polls vnic open status.
180 */
181static int
182snic_dev_wait(struct vnic_dev *vdev,
183 int (*start)(struct vnic_dev *, int),
184 int (*finished)(struct vnic_dev *, int *),
185 int arg)
186{
187 unsigned long time;
188 int ret, done;
189 int retry_cnt = 0;
190
191 ret = start(vdev, arg);
192 if (ret)
193 return ret;
194
195 /*
196 * Wait for func to complete...2 seconds max.
197 *
198 * Sometimes schedule_timeout_uninterruptible take long time
199 * to wakeup, which results skipping retry. The retry counter
200 * ensures to retry at least two times.
201 */
202 time = jiffies + (HZ * 2);
203 do {
204 ret = finished(vdev, &done);
205 if (ret)
206 return ret;
207
208 if (done)
209 return 0;
210 schedule_timeout_uninterruptible(HZ/10);
211 ++retry_cnt;
212 } while (time_after(time, jiffies) || (retry_cnt < 3));
213
214 return -ETIMEDOUT;
215} /* end of snic_dev_wait */
216
217/*
218 * snic_cleanup: called by snic_remove
219 * Stops the snic device, masks all interrupts, Completed CQ entries are
220 * drained. Posted WQ/RQ/Copy-WQ entries are cleanup
221 */
222static int
223snic_cleanup(struct snic *snic)
224{
225 unsigned int i;
226 int ret;
227
228 svnic_dev_disable(snic->vdev);
229 for (i = 0; i < snic->intr_count; i++)
230 svnic_intr_mask(&snic->intr[i]);
231
232 for (i = 0; i < snic->wq_count; i++) {
233 ret = svnic_wq_disable(&snic->wq[i]);
234 if (ret)
235 return ret;
236 }
237
238 /* Clean up completed IOs */
239 snic_fwcq_cmpl_handler(snic, -1);
240
241 snic_wq_cmpl_handler(snic, -1);
242
243 /* Clean up the IOs that have not completed */
244 for (i = 0; i < snic->wq_count; i++)
245 svnic_wq_clean(&snic->wq[i], snic_free_wq_buf);
246
247 for (i = 0; i < snic->cq_count; i++)
248 svnic_cq_clean(&snic->cq[i]);
249
250 for (i = 0; i < snic->intr_count; i++)
251 svnic_intr_clean(&snic->intr[i]);
252
253 /* Cleanup snic specific requests */
254 snic_free_all_untagged_reqs(snic);
255
256 /* Cleanup Pending SCSI commands */
257 snic_shutdown_scsi_cleanup(snic);
258
259 for (i = 0; i < SNIC_REQ_MAX_CACHES; i++)
260 mempool_destroy(snic->req_pool[i]);
261
262 return 0;
263} /* end of snic_cleanup */
264
265
266static void
267snic_iounmap(struct snic *snic)
268{
269 if (snic->bar0.vaddr)
270 iounmap(snic->bar0.vaddr);
271}
272
273/*
274 * snic_vdev_open_done : polls for svnic_dev_open cmd completion.
275 */
276static int
277snic_vdev_open_done(struct vnic_dev *vdev, int *done)
278{
279 struct snic *snic = svnic_dev_priv(vdev);
280 int ret;
281 int nretries = 5;
282
283 do {
284 ret = svnic_dev_open_done(vdev, done);
285 if (ret == 0)
286 break;
287
288 SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n");
289 } while (nretries--);
290
291 return ret;
292} /* end of snic_vdev_open_done */
293
294/*
295 * snic_add_host : registers scsi host with ML
296 */
297static int
298snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev)
299{
300 int ret = 0;
301
302 ret = scsi_add_host(shost, &pdev->dev);
303 if (ret) {
304 SNIC_HOST_ERR(shost,
305 "snic: scsi_add_host failed. %d\n",
306 ret);
307
308 return ret;
309 }
310
311 SNIC_BUG_ON(shost->work_q != NULL);
312 snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d",
313 shost->host_no);
314 shost->work_q = create_singlethread_workqueue(shost->work_q_name);
315 if (!shost->work_q) {
316 SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n");
317
318 ret = -ENOMEM;
319 }
320
321 return ret;
322} /* end of snic_add_host */
323
324static void
325snic_del_host(struct Scsi_Host *shost)
326{
327 if (!shost->work_q)
328 return;
329
330 destroy_workqueue(shost->work_q);
331 shost->work_q = NULL;
332 scsi_remove_host(shost);
333}
334
335int
336snic_get_state(struct snic *snic)
337{
338 return atomic_read(&snic->state);
339}
340
341void
342snic_set_state(struct snic *snic, enum snic_state state)
343{
344 SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n",
345 snic_state_to_str(snic_get_state(snic)),
346 snic_state_to_str(state));
347
348 atomic_set(&snic->state, state);
349}
350
351/*
352 * snic_probe : Initialize the snic interface.
353 */
354static int
355snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
356{
357 struct Scsi_Host *shost;
358 struct snic *snic;
359 mempool_t *pool;
360 unsigned long flags;
361 u32 max_ios = 0;
362 int ret, i;
363
364 /* Device Information */
365 SNIC_INFO("snic device %4x:%4x:%4x:%4x: ",
366 pdev->vendor, pdev->device, pdev->subsystem_vendor,
367 pdev->subsystem_device);
368
369 SNIC_INFO("snic device bus %x: slot %x: fn %x\n",
370 pdev->bus->number, PCI_SLOT(pdev->devfn),
371 PCI_FUNC(pdev->devfn));
372
373 /*
374 * Allocate SCSI Host and setup association between host, and snic
375 */
376 shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic));
377 if (!shost) {
378 SNIC_ERR("Unable to alloc scsi_host\n");
379 ret = -ENOMEM;
380
381 goto prob_end;
382 }
383 snic = shost_priv(shost);
384 snic->shost = shost;
385
386 snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME,
387 shost->host_no);
388
389 SNIC_HOST_INFO(shost,
390 "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n",
391 shost->host_no, snic, shost, pdev->bus->number,
392 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
393#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
394 /* Per snic debugfs init */
395 ret = snic_stats_debugfs_init(snic);
396 if (ret) {
397 SNIC_HOST_ERR(snic->shost,
398 "Failed to initialize debugfs stats\n");
399 snic_stats_debugfs_remove(snic);
400 }
401#endif
402
403 /* Setup PCI Resources */
404 pci_set_drvdata(pdev, snic);
405 snic->pdev = pdev;
406
407 ret = pci_enable_device(pdev);
408 if (ret) {
409 SNIC_HOST_ERR(shost,
410 "Cannot enable PCI Resources, aborting : %d\n",
411 ret);
412
413 goto err_free_snic;
414 }
415
416 ret = pci_request_regions(pdev, SNIC_DRV_NAME);
417 if (ret) {
418 SNIC_HOST_ERR(shost,
419 "Cannot obtain PCI Resources, aborting : %d\n",
420 ret);
421
422 goto err_pci_disable;
423 }
424
425 pci_set_master(pdev);
426
427 /*
428 * Query PCI Controller on system for DMA addressing
429 * limitation for the device. Try 43-bit first, and
430 * fail to 32-bit.
431 */
432 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43));
433 if (ret) {
434 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
435 if (ret) {
436 SNIC_HOST_ERR(shost,
437 "No Usable DMA Configuration, aborting %d\n",
438 ret);
439
440 goto err_rel_regions;
441 }
442
443 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
444 if (ret) {
445 SNIC_HOST_ERR(shost,
446 "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n",
447 ret);
448
449 goto err_rel_regions;
450 }
451 } else {
452 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43));
453 if (ret) {
454 SNIC_HOST_ERR(shost,
455 "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n",
456 ret);
457
458 goto err_rel_regions;
459 }
460 }
461
462
463 /* Map vNIC resources from BAR0 */
464 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
465 SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
466
467 ret = -ENODEV;
468 goto err_rel_regions;
469 }
470
471 snic->bar0.vaddr = pci_iomap(pdev, 0, 0);
472 if (!snic->bar0.vaddr) {
473 SNIC_HOST_ERR(shost,
474 "Cannot memory map BAR0 res hdr aborting.\n");
475
476 ret = -ENODEV;
477 goto err_rel_regions;
478 }
479
480 snic->bar0.bus_addr = pci_resource_start(pdev, 0);
481 snic->bar0.len = pci_resource_len(pdev, 0);
482 SNIC_BUG_ON(snic->bar0.bus_addr == 0);
483
484 /* Devcmd2 Resource Allocation and Initialization */
485 snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1);
486 if (!snic->vdev) {
487 SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n");
488
489 ret = -ENODEV;
490 goto err_iounmap;
491 }
492
493 ret = svnic_dev_cmd_init(snic->vdev, 0);
494 if (ret) {
495 SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret);
496
497 goto err_vnic_unreg;
498 }
499
500 ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0);
501 if (ret) {
502 SNIC_HOST_ERR(shost,
503 "vNIC dev open failed, aborting. %d\n",
504 ret);
505
506 goto err_vnic_unreg;
507 }
508
509 ret = svnic_dev_init(snic->vdev, 0);
510 if (ret) {
511 SNIC_HOST_ERR(shost,
512 "vNIC dev init failed. aborting. %d\n",
513 ret);
514
515 goto err_dev_close;
516 }
517
518 /* Get vNIC information */
519 ret = snic_get_vnic_config(snic);
520 if (ret) {
521 SNIC_HOST_ERR(shost,
522 "Get vNIC configuration failed, aborting. %d\n",
523 ret);
524
525 goto err_dev_close;
526 }
527
528 /* Configure Maximum Outstanding IO reqs */
529 max_ios = snic->config.io_throttle_count;
530 if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
531 shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ,
532 max_t(u32, SNIC_MIN_IO_REQ, max_ios));
533
534 snic->max_tag_id = shost->can_queue;
535
536 ret = scsi_init_shared_tag_map(shost, snic->max_tag_id);
537 if (ret) {
538 SNIC_HOST_ERR(shost,
539 "Unable to alloc shared tag map. %d\n",
540 ret);
541
542 goto err_dev_close;
543 }
544
545 shost->max_lun = snic->config.luns_per_tgt;
546 shost->max_id = SNIC_MAX_TARGET;
547
548 shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/
549
550 snic_get_res_counts(snic);
551
552 /*
553 * Assumption: Only MSIx is supported
554 */
555 ret = snic_set_intr_mode(snic);
556 if (ret) {
557 SNIC_HOST_ERR(shost,
558 "Failed to set intr mode aborting. %d\n",
559 ret);
560
561 goto err_dev_close;
562 }
563
564 ret = snic_alloc_vnic_res(snic);
565 if (ret) {
566 SNIC_HOST_ERR(shost,
567 "Failed to alloc vNIC resources aborting. %d\n",
568 ret);
569
570 goto err_clear_intr;
571 }
572
573 /* Initialize specific lists */
574 INIT_LIST_HEAD(&snic->list);
575
576 /*
577 * spl_cmd_list for maintaining snic specific cmds
578 * such as EXCH_VER_REQ, REPORT_TARGETS etc
579 */
580 INIT_LIST_HEAD(&snic->spl_cmd_list);
581 spin_lock_init(&snic->spl_cmd_lock);
582
583 /* initialize all snic locks */
584 spin_lock_init(&snic->snic_lock);
585
586 for (i = 0; i < SNIC_WQ_MAX; i++)
587 spin_lock_init(&snic->wq_lock[i]);
588
589 for (i = 0; i < SNIC_IO_LOCKS; i++)
590 spin_lock_init(&snic->io_req_lock[i]);
591
592 pool = mempool_create_slab_pool(2,
593 snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
594 if (!pool) {
595 SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
596
597 goto err_free_res;
598 }
599
600 snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool;
601
602 pool = mempool_create_slab_pool(2,
603 snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
604 if (!pool) {
605 SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
606
607 goto err_free_dflt_sgl_pool;
608 }
609
610 snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool;
611
612 pool = mempool_create_slab_pool(2,
613 snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
614 if (!pool) {
615 SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
616
617 goto err_free_max_sgl_pool;
618 }
619
620 snic->req_pool[SNIC_REQ_TM_CACHE] = pool;
621
622 /* Initialize snic state */
623 atomic_set(&snic->state, SNIC_INIT);
624
625 atomic_set(&snic->ios_inflight, 0);
626
627 /* Setup notification buffer area */
628 ret = snic_notify_set(snic);
629 if (ret) {
630 SNIC_HOST_ERR(shost,
631 "Failed to alloc notify buffer aborting. %d\n",
632 ret);
633
634 goto err_free_tmreq_pool;
635 }
636
637 /*
638 * Initialization done with PCI system, hardware, firmware.
639 * Add shost to SCSI
640 */
641 ret = snic_add_host(shost, pdev);
642 if (ret) {
643 SNIC_HOST_ERR(shost,
644 "Adding scsi host Failed ... exiting. %d\n",
645 ret);
646
647 goto err_notify_unset;
648 }
649
650 spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
651 list_add_tail(&snic->list, &snic_glob->snic_list);
652 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
653
654 snic_disc_init(&snic->disc);
655 INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc);
656 INIT_WORK(&snic->disc_work, snic_handle_disc);
657 INIT_WORK(&snic->link_work, snic_handle_link);
658
659 /* Enable all queues */
660 for (i = 0; i < snic->wq_count; i++)
661 svnic_wq_enable(&snic->wq[i]);
662
663 ret = svnic_dev_enable_wait(snic->vdev);
664 if (ret) {
665 SNIC_HOST_ERR(shost,
666 "vNIC dev enable failed w/ error %d\n",
667 ret);
668
669 goto err_vdev_enable;
670 }
671
672 ret = snic_request_intr(snic);
673 if (ret) {
674 SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret);
675
676 goto err_req_intr;
677 }
678
679 for (i = 0; i < snic->intr_count; i++)
680 svnic_intr_unmask(&snic->intr[i]);
681
682 snic_set_state(snic, SNIC_ONLINE);
683
684 /* Get snic params */
685 ret = snic_get_conf(snic);
686 if (ret) {
687 SNIC_HOST_ERR(shost,
688 "Failed to get snic io config from FW w err %d\n",
689 ret);
690
691 goto err_get_conf;
692 }
693
694 ret = snic_disc_start(snic);
695 if (ret) {
696 SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n",
697 ret);
698
699 goto err_get_conf;
700 }
701
702 SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n");
703
704 return 0;
705
706err_get_conf:
707 snic_free_all_untagged_reqs(snic);
708
709 for (i = 0; i < snic->intr_count; i++)
710 svnic_intr_mask(&snic->intr[i]);
711
712 snic_free_intr(snic);
713
714err_req_intr:
715 svnic_dev_disable(snic->vdev);
716
717err_vdev_enable:
718 for (i = 0; i < snic->wq_count; i++) {
719 int rc = 0;
720
721 rc = svnic_wq_disable(&snic->wq[i]);
722 if (rc) {
723 SNIC_HOST_ERR(shost,
724 "WQ Disable Failed w/ err = %d\n", rc);
725
726 break;
727 }
728 }
729 snic_del_host(snic->shost);
730
731err_notify_unset:
732 svnic_dev_notify_unset(snic->vdev);
733
734err_free_tmreq_pool:
735 mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]);
736
737err_free_max_sgl_pool:
738 mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]);
739
740err_free_dflt_sgl_pool:
741 mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]);
742
743err_free_res:
744 snic_free_vnic_res(snic);
745
746err_clear_intr:
747 snic_clear_intr_mode(snic);
748
749err_dev_close:
750 svnic_dev_close(snic->vdev);
751
752err_vnic_unreg:
753 svnic_dev_unregister(snic->vdev);
754
755err_iounmap:
756 snic_iounmap(snic);
757
758err_rel_regions:
759 pci_release_regions(pdev);
760
761err_pci_disable:
762 pci_disable_device(pdev);
763
764err_free_snic:
765#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
766 snic_stats_debugfs_remove(snic);
767#endif
768 scsi_host_put(shost);
769 pci_set_drvdata(pdev, NULL);
770
771prob_end:
772 SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n",
773 pdev->bus->number, PCI_SLOT(pdev->devfn),
774 PCI_FUNC(pdev->devfn));
775
776 return ret;
777} /* end of snic_probe */
778
779
780/*
781 * snic_remove : invoked on unbinding the interface to cleanup the
782 * resources allocated in snic_probe on initialization.
783 */
784static void
785snic_remove(struct pci_dev *pdev)
786{
787 struct snic *snic = pci_get_drvdata(pdev);
788 unsigned long flags;
789
790 if (!snic) {
791 SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n",
792 pdev->bus->number, PCI_SLOT(pdev->devfn),
793 PCI_FUNC(pdev->devfn));
794
795 return;
796 }
797
798 /*
799 * Mark state so that the workqueue thread stops forwarding
800 * received frames and link events. ISR and other threads
801 * that can queue work items will also stop creating work
802 * items on the snic workqueue
803 */
804 snic_set_state(snic, SNIC_OFFLINE);
805 spin_lock_irqsave(&snic->snic_lock, flags);
806 snic->stop_link_events = 1;
807 spin_unlock_irqrestore(&snic->snic_lock, flags);
808
809 flush_workqueue(snic_glob->event_q);
810 snic_disc_term(snic);
811
812 spin_lock_irqsave(&snic->snic_lock, flags);
813 snic->in_remove = 1;
814 spin_unlock_irqrestore(&snic->snic_lock, flags);
815
816 /*
817 * This stops the snic device, masks all interrupts, Completed
818 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
819 * cleanup
820 */
821 snic_cleanup(snic);
822
823 spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
824 list_del(&snic->list);
825 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
826
827 snic_tgt_del_all(snic);
828#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
829 snic_stats_debugfs_remove(snic);
830#endif
831 snic_del_host(snic->shost);
832
833 svnic_dev_notify_unset(snic->vdev);
834 snic_free_intr(snic);
835 snic_free_vnic_res(snic);
836 snic_clear_intr_mode(snic);
837 svnic_dev_close(snic->vdev);
838 svnic_dev_unregister(snic->vdev);
839 snic_iounmap(snic);
840 pci_release_regions(pdev);
841 pci_disable_device(pdev);
842 pci_set_drvdata(pdev, NULL);
843
844 /* this frees Scsi_Host and snic memory (continuous chunk) */
845 scsi_host_put(snic->shost);
846} /* end of snic_remove */
847
848
849struct snic_global *snic_glob;
850
851/*
852 * snic_global_data_init: Initialize SNIC Global Data
853 * Notes: All the global lists, variables should be part of global data
854 * this helps in debugging.
855 */
856static int
857snic_global_data_init(void)
858{
859 int ret = 0;
860 struct kmem_cache *cachep;
861 ssize_t len = 0;
862
863 snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL);
864
865 if (!snic_glob) {
866 SNIC_ERR("Failed to allocate Global Context.\n");
867
868 ret = -ENOMEM;
869 goto gdi_end;
870 }
871
872#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
873 /* Debugfs related Initialization */
874 /* Create debugfs entries for snic */
875 ret = snic_debugfs_init();
876 if (ret < 0) {
877 SNIC_ERR("Failed to create sysfs dir for tracing and stats.\n");
878 snic_debugfs_term();
879 /* continue even if it fails */
880 }
881
882 /* Trace related Initialization */
883 /* Allocate memory for trace buffer */
884 ret = snic_trc_init();
885 if (ret < 0) {
886 SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n");
887 snic_trc_free();
888 /* continue even if it fails */
889 }
890
891#endif
892 INIT_LIST_HEAD(&snic_glob->snic_list);
893 spin_lock_init(&snic_glob->snic_list_lock);
894
895 /* Create a cache for allocation of snic_host_req+default size ESGLs */
896 len = sizeof(struct snic_req_info);
897 len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl);
898 cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN,
899 SLAB_HWCACHE_ALIGN, NULL);
900 if (!cachep) {
901 SNIC_ERR("Failed to create snic default sgl slab\n");
902 ret = -ENOMEM;
903
904 goto err_dflt_req_slab;
905 }
906 snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep;
907
908 /* Create a cache for allocation of max size Extended SGLs */
909 len = sizeof(struct snic_req_info);
910 len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl);
911 cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
912 SLAB_HWCACHE_ALIGN, NULL);
913 if (!cachep) {
914 SNIC_ERR("Failed to create snic max sgl slab\n");
915 ret = -ENOMEM;
916
917 goto err_max_req_slab;
918 }
919 snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep;
920
921 len = sizeof(struct snic_host_req);
922 cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
923 SLAB_HWCACHE_ALIGN, NULL);
924 if (!cachep) {
925 SNIC_ERR("Failed to create snic tm req slab\n");
926 ret = -ENOMEM;
927
928 goto err_tmreq_slab;
929 }
930 snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep;
931
932 /* snic_event queue */
933 snic_glob->event_q = create_singlethread_workqueue("snic_event_wq");
934 if (!snic_glob->event_q) {
935 SNIC_ERR("snic event queue create failed\n");
936 ret = -ENOMEM;
937
938 goto err_eventq;
939 }
940
941 return ret;
942
943err_eventq:
944 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
945
946err_tmreq_slab:
947 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
948
949err_max_req_slab:
950 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
951
952err_dflt_req_slab:
953#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
954 snic_trc_free();
955 snic_debugfs_term();
956#endif
957 kfree(snic_glob);
958 snic_glob = NULL;
959
960gdi_end:
961 return ret;
962} /* end of snic_glob_init */
963
964/*
965 * snic_global_data_cleanup : Frees SNIC Global Data
966 */
967static void
968snic_global_data_cleanup(void)
969{
970 SNIC_BUG_ON(snic_glob == NULL);
971
972 destroy_workqueue(snic_glob->event_q);
973 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
974 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
975 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
976
977#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
978 /* Freeing Trace Resources */
979 snic_trc_free();
980
981 /* Freeing Debugfs Resources */
982 snic_debugfs_term();
983#endif
984 kfree(snic_glob);
985 snic_glob = NULL;
986} /* end of snic_glob_cleanup */
987
988static struct pci_driver snic_driver = {
989 .name = SNIC_DRV_NAME,
990 .id_table = snic_id_table,
991 .probe = snic_probe,
992 .remove = snic_remove,
993};
994
995static int __init
996snic_init_module(void)
997{
998 int ret = 0;
999
1000#ifndef __x86_64__
1001 SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n");
1002 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1003#endif
1004
1005 SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION);
1006
1007 ret = snic_global_data_init();
1008 if (ret) {
1009 SNIC_ERR("Failed to Initialize Global Data.\n");
1010
1011 return ret;
1012 }
1013
1014 ret = pci_register_driver(&snic_driver);
1015 if (ret < 0) {
1016 SNIC_ERR("PCI driver register error\n");
1017
1018 goto err_pci_reg;
1019 }
1020
1021 return ret;
1022
1023err_pci_reg:
1024 snic_global_data_cleanup();
1025
1026 return ret;
1027}
1028
1029static void __exit
1030snic_cleanup_module(void)
1031{
1032 pci_unregister_driver(&snic_driver);
1033 snic_global_data_cleanup();
1034}
1035
1036module_init(snic_init_module);
1037module_exit(snic_cleanup_module);
1038
1039MODULE_LICENSE("GPL v2");
1040MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION);
1041MODULE_VERSION(SNIC_DRV_VERSION);
1042MODULE_DEVICE_TABLE(pci, snic_id_table);
1043MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, "
1044 "Sesidhar Baddela <sebaddel@cisco.com>");
diff --git a/drivers/scsi/snic/snic_res.c b/drivers/scsi/snic/snic_res.c
new file mode 100644
index 000000000000..b54912c8ca0c
--- /dev/null
+++ b/drivers/scsi/snic/snic_res.c
@@ -0,0 +1,295 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/pci.h>
21
22#include "wq_enet_desc.h"
23#include "cq_enet_desc.h"
24#include "vnic_resource.h"
25#include "vnic_dev.h"
26#include "vnic_wq.h"
27#include "vnic_cq.h"
28#include "vnic_intr.h"
29#include "vnic_stats.h"
30#include "snic.h"
31
32int
33snic_get_vnic_config(struct snic *snic)
34{
35 struct vnic_snic_config *c = &snic->config;
36 int ret;
37
38#define GET_CONFIG(m) \
39 do { \
40 ret = svnic_dev_spec(snic->vdev, \
41 offsetof(struct vnic_snic_config, m), \
42 sizeof(c->m), \
43 &c->m); \
44 if (ret) { \
45 SNIC_HOST_ERR(snic->shost, \
46 "Error getting %s, %d\n", #m, ret); \
47 return ret; \
48 } \
49 } while (0)
50
51 GET_CONFIG(wq_enet_desc_count);
52 GET_CONFIG(maxdatafieldsize);
53 GET_CONFIG(intr_timer);
54 GET_CONFIG(intr_timer_type);
55 GET_CONFIG(flags);
56 GET_CONFIG(io_throttle_count);
57 GET_CONFIG(port_down_timeout);
58 GET_CONFIG(port_down_io_retries);
59 GET_CONFIG(luns_per_tgt);
60 GET_CONFIG(xpt_type);
61 GET_CONFIG(hid);
62
63 c->wq_enet_desc_count = min_t(u32,
64 VNIC_SNIC_WQ_DESCS_MAX,
65 max_t(u32,
66 VNIC_SNIC_WQ_DESCS_MIN,
67 c->wq_enet_desc_count));
68
69 c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16);
70
71 c->maxdatafieldsize = min_t(u32,
72 VNIC_SNIC_MAXDATAFIELDSIZE_MAX,
73 max_t(u32,
74 VNIC_SNIC_MAXDATAFIELDSIZE_MIN,
75 c->maxdatafieldsize));
76
77 c->io_throttle_count = min_t(u32,
78 VNIC_SNIC_IO_THROTTLE_COUNT_MAX,
79 max_t(u32,
80 VNIC_SNIC_IO_THROTTLE_COUNT_MIN,
81 c->io_throttle_count));
82
83 c->port_down_timeout = min_t(u32,
84 VNIC_SNIC_PORT_DOWN_TIMEOUT_MAX,
85 c->port_down_timeout);
86
87 c->port_down_io_retries = min_t(u32,
88 VNIC_SNIC_PORT_DOWN_IO_RETRIES_MAX,
89 c->port_down_io_retries);
90
91 c->luns_per_tgt = min_t(u32,
92 VNIC_SNIC_LUNS_PER_TARGET_MAX,
93 max_t(u32,
94 VNIC_SNIC_LUNS_PER_TARGET_MIN,
95 c->luns_per_tgt));
96
97 c->intr_timer = min_t(u32, VNIC_INTR_TIMER_MAX, c->intr_timer);
98
99 SNIC_INFO("vNIC resources wq %d\n", c->wq_enet_desc_count);
100 SNIC_INFO("vNIC mtu %d intr timer %d\n",
101 c->maxdatafieldsize,
102 c->intr_timer);
103
104 SNIC_INFO("vNIC flags 0x%x luns per tgt %d\n",
105 c->flags,
106 c->luns_per_tgt);
107
108 SNIC_INFO("vNIC io throttle count %d\n", c->io_throttle_count);
109 SNIC_INFO("vNIC port down timeout %d port down io retries %d\n",
110 c->port_down_timeout,
111 c->port_down_io_retries);
112
113 SNIC_INFO("vNIC back end type = %d\n", c->xpt_type);
114 SNIC_INFO("vNIC hid = %d\n", c->hid);
115
116 return 0;
117}
118
119void
120snic_get_res_counts(struct snic *snic)
121{
122 snic->wq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_WQ);
123 SNIC_BUG_ON(snic->wq_count == 0);
124 snic->cq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_CQ);
125 SNIC_BUG_ON(snic->cq_count == 0);
126 snic->intr_count = svnic_dev_get_res_count(snic->vdev,
127 RES_TYPE_INTR_CTRL);
128 SNIC_BUG_ON(snic->intr_count == 0);
129}
130
131void
132snic_free_vnic_res(struct snic *snic)
133{
134 unsigned int i;
135
136 for (i = 0; i < snic->wq_count; i++)
137 svnic_wq_free(&snic->wq[i]);
138
139 for (i = 0; i < snic->cq_count; i++)
140 svnic_cq_free(&snic->cq[i]);
141
142 for (i = 0; i < snic->intr_count; i++)
143 svnic_intr_free(&snic->intr[i]);
144}
145
146int
147snic_alloc_vnic_res(struct snic *snic)
148{
149 enum vnic_dev_intr_mode intr_mode;
150 unsigned int mask_on_assertion;
151 unsigned int intr_offset;
152 unsigned int err_intr_enable;
153 unsigned int err_intr_offset;
154 unsigned int i;
155 int ret;
156
157 intr_mode = svnic_dev_get_intr_mode(snic->vdev);
158
159 SNIC_INFO("vNIC interrupt mode: %s\n",
160 ((intr_mode == VNIC_DEV_INTR_MODE_INTX) ?
161 "Legacy PCI INTx" :
162 ((intr_mode == VNIC_DEV_INTR_MODE_MSI) ?
163 "MSI" :
164 ((intr_mode == VNIC_DEV_INTR_MODE_MSIX) ?
165 "MSI-X" : "Unknown"))));
166
167 /* only MSI-X is supported */
168 SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
169
170 SNIC_INFO("wq %d cq %d intr %d\n", snic->wq_count,
171 snic->cq_count,
172 snic->intr_count);
173
174
175 /* Allocate WQs used for SCSI IOs */
176 for (i = 0; i < snic->wq_count; i++) {
177 ret = svnic_wq_alloc(snic->vdev,
178 &snic->wq[i],
179 i,
180 snic->config.wq_enet_desc_count,
181 sizeof(struct wq_enet_desc));
182 if (ret)
183 goto error_cleanup;
184 }
185
186 /* CQ for each WQ */
187 for (i = 0; i < snic->wq_count; i++) {
188 ret = svnic_cq_alloc(snic->vdev,
189 &snic->cq[i],
190 i,
191 snic->config.wq_enet_desc_count,
192 sizeof(struct cq_enet_wq_desc));
193 if (ret)
194 goto error_cleanup;
195 }
196
197 SNIC_BUG_ON(snic->cq_count != 2 * snic->wq_count);
198 /* CQ for FW TO host */
199 for (i = snic->wq_count; i < snic->cq_count; i++) {
200 ret = svnic_cq_alloc(snic->vdev,
201 &snic->cq[i],
202 i,
203 (snic->config.wq_enet_desc_count * 3),
204 sizeof(struct snic_fw_req));
205 if (ret)
206 goto error_cleanup;
207 }
208
209 for (i = 0; i < snic->intr_count; i++) {
210 ret = svnic_intr_alloc(snic->vdev, &snic->intr[i], i);
211 if (ret)
212 goto error_cleanup;
213 }
214
215 /*
216 * Init WQ Resources.
217 * WQ[0 to n] points to CQ[0 to n-1]
218 * firmware to host comm points to CQ[n to m+1]
219 */
220 err_intr_enable = 1;
221 err_intr_offset = snic->err_intr_offset;
222
223 for (i = 0; i < snic->wq_count; i++) {
224 svnic_wq_init(&snic->wq[i],
225 i,
226 err_intr_enable,
227 err_intr_offset);
228 }
229
230 for (i = 0; i < snic->cq_count; i++) {
231 intr_offset = i;
232
233 svnic_cq_init(&snic->cq[i],
234 0 /* flow_control_enable */,
235 1 /* color_enable */,
236 0 /* cq_head */,
237 0 /* cq_tail */,
238 1 /* cq_tail_color */,
239 1 /* interrupt_enable */,
240 1 /* cq_entry_enable */,
241 0 /* cq_message_enable */,
242 intr_offset,
243 0 /* cq_message_addr */);
244 }
245
246 /*
247 * Init INTR resources
248 * Assumption : snic is always in MSI-X mode
249 */
250 SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
251 mask_on_assertion = 1;
252
253 for (i = 0; i < snic->intr_count; i++) {
254 svnic_intr_init(&snic->intr[i],
255 snic->config.intr_timer,
256 snic->config.intr_timer_type,
257 mask_on_assertion);
258 }
259
260 /* init the stats memory by making the first call here */
261 ret = svnic_dev_stats_dump(snic->vdev, &snic->stats);
262 if (ret) {
263 SNIC_HOST_ERR(snic->shost,
264 "svnic_dev_stats_dump failed - x%x\n",
265 ret);
266 goto error_cleanup;
267 }
268
269 /* Clear LIF stats */
270 svnic_dev_stats_clear(snic->vdev);
271 ret = 0;
272
273 return ret;
274
275error_cleanup:
276 snic_free_vnic_res(snic);
277
278 return ret;
279}
280
281void
282snic_log_q_error(struct snic *snic)
283{
284 unsigned int i;
285 u32 err_status;
286
287 for (i = 0; i < snic->wq_count; i++) {
288 err_status = ioread32(&snic->wq[i].ctrl->error_status);
289 if (err_status)
290 SNIC_HOST_ERR(snic->shost,
291 "WQ[%d] error status %d\n",
292 i,
293 err_status);
294 }
295} /* end of snic_log_q_error */
diff --git a/drivers/scsi/snic/snic_res.h b/drivers/scsi/snic/snic_res.h
new file mode 100644
index 000000000000..273f72f2a023
--- /dev/null
+++ b/drivers/scsi/snic/snic_res.h
@@ -0,0 +1,97 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef __SNIC_RES_H
19#define __SNIC_RES_H
20
21#include "snic_io.h"
22#include "wq_enet_desc.h"
23#include "vnic_wq.h"
24#include "snic_fwint.h"
25#include "vnic_cq_fw.h"
26
27static inline void
28snic_icmnd_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, u64 ctx,
29 u16 flags, u64 tgt_id, u8 *lun, u8 *scsi_cdb, u8 cdb_len,
30 u32 data_len, u16 sg_cnt, ulong sgl_addr,
31 dma_addr_t sns_addr_pa, u32 sense_len)
32{
33 snic_io_hdr_enc(&req->hdr, SNIC_REQ_ICMND, 0, cmnd_id, host_id, sg_cnt,
34 ctx);
35
36 req->u.icmnd.flags = cpu_to_le16(flags);
37 req->u.icmnd.tgt_id = cpu_to_le64(tgt_id);
38 memcpy(&req->u.icmnd.lun_id, lun, LUN_ADDR_LEN);
39 req->u.icmnd.cdb_len = cdb_len;
40 memset(req->u.icmnd.cdb, 0, SNIC_CDB_LEN);
41 memcpy(req->u.icmnd.cdb, scsi_cdb, cdb_len);
42 req->u.icmnd.data_len = cpu_to_le32(data_len);
43 req->u.icmnd.sg_addr = cpu_to_le64(sgl_addr);
44 req->u.icmnd.sense_len = cpu_to_le32(sense_len);
45 req->u.icmnd.sense_addr = cpu_to_le64(sns_addr_pa);
46}
47
48static inline void
49snic_itmf_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, ulong ctx,
50 u16 flags, u32 req_id, u64 tgt_id, u8 *lun, u8 tm_type)
51{
52 snic_io_hdr_enc(&req->hdr, SNIC_REQ_ITMF, 0, cmnd_id, host_id, 0, ctx);
53
54 req->u.itmf.tm_type = tm_type;
55 req->u.itmf.flags = cpu_to_le16(flags);
56 /* req_id valid only in abort, clear task */
57 req->u.itmf.req_id = cpu_to_le32(req_id);
58 req->u.itmf.tgt_id = cpu_to_le64(tgt_id);
59 memcpy(&req->u.itmf.lun_id, lun, LUN_ADDR_LEN);
60}
61
62static inline void
63snic_queue_wq_eth_desc(struct vnic_wq *wq,
64 void *os_buf,
65 dma_addr_t dma_addr,
66 unsigned int len,
67 int vlan_tag_insert,
68 unsigned int vlan_tag,
69 int cq_entry)
70{
71 struct wq_enet_desc *desc = svnic_wq_next_desc(wq);
72
73 wq_enet_desc_enc(desc,
74 (u64)dma_addr | VNIC_PADDR_TARGET,
75 (u16)len,
76 0, /* mss_or_csum_offset */
77 0, /* fc_eof */
78 0, /* offload mode */
79 1, /* eop */
80 (u8)cq_entry,
81 0, /* fcoe_encap */
82 (u8)vlan_tag_insert,
83 (u16)vlan_tag,
84 0 /* loopback */);
85
86 svnic_wq_post(wq, os_buf, dma_addr, len, 1, 1);
87}
88
89struct snic;
90
91int snic_get_vnic_config(struct snic *);
92int snic_alloc_vnic_res(struct snic *);
93void snic_free_vnic_res(struct snic *);
94void snic_get_res_counts(struct snic *);
95void snic_log_q_error(struct snic *);
96int snic_get_vnic_resources_size(struct snic *);
97#endif /* __SNIC_RES_H */
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
new file mode 100644
index 000000000000..2c7b4c321cbe
--- /dev/null
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -0,0 +1,2632 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#include <linux/mempool.h>
19#include <linux/errno.h>
20#include <linux/init.h>
21#include <linux/workqueue.h>
22#include <linux/pci.h>
23#include <linux/spinlock.h>
24#include <linux/delay.h>
25#include <linux/gfp.h>
26#include <scsi/scsi.h>
27#include <scsi/scsi_host.h>
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_dbg.h>
32
33#include "snic_io.h"
34#include "snic.h"
35
36#define snic_cmd_tag(sc) (((struct scsi_cmnd *) sc)->request->tag)
37
38const char *snic_state_str[] = {
39 [SNIC_INIT] = "SNIC_INIT",
40 [SNIC_ERROR] = "SNIC_ERROR",
41 [SNIC_ONLINE] = "SNIC_ONLINE",
42 [SNIC_OFFLINE] = "SNIC_OFFLINE",
43 [SNIC_FWRESET] = "SNIC_FWRESET",
44};
45
46static const char * const snic_req_state_str[] = {
47 [SNIC_IOREQ_NOT_INITED] = "SNIC_IOREQ_NOT_INITED",
48 [SNIC_IOREQ_PENDING] = "SNIC_IOREQ_PENDING",
49 [SNIC_IOREQ_ABTS_PENDING] = "SNIC_IOREQ_ABTS_PENDING",
50 [SNIC_IOREQ_ABTS_COMPLETE] = "SNIC_IOREQ_ABTS_COMPELTE",
51 [SNIC_IOREQ_LR_PENDING] = "SNIC_IOREQ_LR_PENDING",
52 [SNIC_IOREQ_LR_COMPLETE] = "SNIC_IOREQ_LR_COMPELTE",
53 [SNIC_IOREQ_COMPLETE] = "SNIC_IOREQ_CMD_COMPELTE",
54};
55
56/* snic cmd status strings */
57static const char * const snic_io_status_str[] = {
58 [SNIC_STAT_IO_SUCCESS] = "SNIC_STAT_IO_SUCCESS", /* 0x0 */
59 [SNIC_STAT_INVALID_HDR] = "SNIC_STAT_INVALID_HDR",
60 [SNIC_STAT_OUT_OF_RES] = "SNIC_STAT_OUT_OF_RES",
61 [SNIC_STAT_INVALID_PARM] = "SNIC_STAT_INVALID_PARM",
62 [SNIC_STAT_REQ_NOT_SUP] = "SNIC_STAT_REQ_NOT_SUP",
63 [SNIC_STAT_IO_NOT_FOUND] = "SNIC_STAT_IO_NOT_FOUND",
64 [SNIC_STAT_ABORTED] = "SNIC_STAT_ABORTED",
65 [SNIC_STAT_TIMEOUT] = "SNIC_STAT_TIMEOUT",
66 [SNIC_STAT_SGL_INVALID] = "SNIC_STAT_SGL_INVALID",
67 [SNIC_STAT_DATA_CNT_MISMATCH] = "SNIC_STAT_DATA_CNT_MISMATCH",
68 [SNIC_STAT_FW_ERR] = "SNIC_STAT_FW_ERR",
69 [SNIC_STAT_ITMF_REJECT] = "SNIC_STAT_ITMF_REJECT",
70 [SNIC_STAT_ITMF_FAIL] = "SNIC_STAT_ITMF_FAIL",
71 [SNIC_STAT_ITMF_INCORRECT_LUN] = "SNIC_STAT_ITMF_INCORRECT_LUN",
72 [SNIC_STAT_CMND_REJECT] = "SNIC_STAT_CMND_REJECT",
73 [SNIC_STAT_DEV_OFFLINE] = "SNIC_STAT_DEV_OFFLINE",
74 [SNIC_STAT_NO_BOOTLUN] = "SNIC_STAT_NO_BOOTLUN",
75 [SNIC_STAT_SCSI_ERR] = "SNIC_STAT_SCSI_ERR",
76 [SNIC_STAT_NOT_READY] = "SNIC_STAT_NOT_READY",
77 [SNIC_STAT_FATAL_ERROR] = "SNIC_STAT_FATAL_ERROR",
78};
79
80static void snic_scsi_cleanup(struct snic *, int);
81
82const char *
83snic_state_to_str(unsigned int state)
84{
85 if (state >= ARRAY_SIZE(snic_state_str) || !snic_state_str[state])
86 return "Unknown";
87
88 return snic_state_str[state];
89}
90
91static const char *
92snic_io_status_to_str(unsigned int state)
93{
94 if ((state >= ARRAY_SIZE(snic_io_status_str)) ||
95 (!snic_io_status_str[state]))
96 return "Unknown";
97
98 return snic_io_status_str[state];
99}
100
101static const char *
102snic_ioreq_state_to_str(unsigned int state)
103{
104 if (state >= ARRAY_SIZE(snic_req_state_str) ||
105 !snic_req_state_str[state])
106 return "Unknown";
107
108 return snic_req_state_str[state];
109}
110
111static inline spinlock_t *
112snic_io_lock_hash(struct snic *snic, struct scsi_cmnd *sc)
113{
114 u32 hash = snic_cmd_tag(sc) & (SNIC_IO_LOCKS - 1);
115
116 return &snic->io_req_lock[hash];
117}
118
119static inline spinlock_t *
120snic_io_lock_tag(struct snic *snic, int tag)
121{
122 return &snic->io_req_lock[tag & (SNIC_IO_LOCKS - 1)];
123}
124
125/* snic_release_req_buf : Releases snic_req_info */
126static void
127snic_release_req_buf(struct snic *snic,
128 struct snic_req_info *rqi,
129 struct scsi_cmnd *sc)
130{
131 struct snic_host_req *req = rqi_to_req(rqi);
132
133 /* Freeing cmd without marking completion, not okay */
134 SNIC_BUG_ON(!((CMD_STATE(sc) == SNIC_IOREQ_COMPLETE) ||
135 (CMD_STATE(sc) == SNIC_IOREQ_ABTS_COMPLETE) ||
136 (CMD_FLAGS(sc) & SNIC_DEV_RST_NOTSUP) ||
137 (CMD_FLAGS(sc) & SNIC_IO_INTERNAL_TERM_ISSUED) ||
138 (CMD_FLAGS(sc) & SNIC_DEV_RST_TERM_ISSUED) ||
139 (CMD_FLAGS(sc) & SNIC_SCSI_CLEANUP) ||
140 (CMD_STATE(sc) == SNIC_IOREQ_LR_COMPLETE)));
141
142 SNIC_SCSI_DBG(snic->shost,
143 "Rel_req:sc %p:tag %x:rqi %p:ioreq %p:abt %p:dr %p: state %s:flags 0x%llx\n",
144 sc, snic_cmd_tag(sc), rqi, rqi->req, rqi->abort_req,
145 rqi->dr_req, snic_ioreq_state_to_str(CMD_STATE(sc)),
146 CMD_FLAGS(sc));
147
148 if (req->u.icmnd.sense_addr)
149 pci_unmap_single(snic->pdev,
150 le64_to_cpu(req->u.icmnd.sense_addr),
151 SCSI_SENSE_BUFFERSIZE,
152 PCI_DMA_FROMDEVICE);
153
154 scsi_dma_unmap(sc);
155
156 snic_req_free(snic, rqi);
157} /* end of snic_release_req_buf */
158
159/*
160 * snic_queue_icmnd_req : Queues snic_icmnd request
161 */
162static int
163snic_queue_icmnd_req(struct snic *snic,
164 struct snic_req_info *rqi,
165 struct scsi_cmnd *sc,
166 int sg_cnt)
167{
168 struct scatterlist *sg;
169 struct snic_sg_desc *sgd;
170 dma_addr_t pa = 0;
171 struct scsi_lun lun;
172 u16 flags = 0;
173 int ret = 0;
174 unsigned int i;
175
176 if (sg_cnt) {
177 flags = SNIC_ICMND_ESGL;
178 sgd = (struct snic_sg_desc *) req_to_sgl(rqi->req);
179
180 for_each_sg(scsi_sglist(sc), sg, sg_cnt, i) {
181 sgd->addr = cpu_to_le64(sg_dma_address(sg));
182 sgd->len = cpu_to_le32(sg_dma_len(sg));
183 sgd->_resvd = 0;
184 sgd++;
185 }
186 }
187
188 pa = pci_map_single(snic->pdev,
189 sc->sense_buffer,
190 SCSI_SENSE_BUFFERSIZE,
191 PCI_DMA_FROMDEVICE);
192
193 if (pci_dma_mapping_error(snic->pdev, pa)) {
194 SNIC_HOST_ERR(snic->shost,
195 "QIcmnd:PCI Map Failed for sns buf %p tag %x\n",
196 sc->sense_buffer, snic_cmd_tag(sc));
197 ret = -ENOMEM;
198
199 return ret;
200 }
201
202 int_to_scsilun(sc->device->lun, &lun);
203 if (sc->sc_data_direction == DMA_FROM_DEVICE)
204 flags |= SNIC_ICMND_RD;
205 if (sc->sc_data_direction == DMA_TO_DEVICE)
206 flags |= SNIC_ICMND_WR;
207
208 /* Initialize icmnd */
209 snic_icmnd_init(rqi->req,
210 snic_cmd_tag(sc),
211 snic->config.hid, /* hid */
212 (ulong) rqi,
213 flags, /* command flags */
214 rqi->tgt_id,
215 lun.scsi_lun,
216 sc->cmnd,
217 sc->cmd_len,
218 scsi_bufflen(sc),
219 sg_cnt,
220 (ulong) req_to_sgl(rqi->req),
221 pa, /* sense buffer pa */
222 SCSI_SENSE_BUFFERSIZE);
223
224 ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
225 if (ret)
226 SNIC_HOST_ERR(snic->shost,
227 "QIcmnd: Queuing Icmnd Failed. ret = %d\n",
228 ret);
229
230 return ret;
231} /* end of snic_queue_icmnd_req */
232
233/*
234 * snic_issue_scsi_req : Prepares IO request and Issues to FW.
235 */
236static int
237snic_issue_scsi_req(struct snic *snic,
238 struct snic_tgt *tgt,
239 struct scsi_cmnd *sc)
240{
241 struct snic_req_info *rqi = NULL;
242 int sg_cnt = 0;
243 int ret = 0;
244 u32 tag = snic_cmd_tag(sc);
245 u64 cmd_trc = 0, cmd_st_flags = 0;
246 spinlock_t *io_lock = NULL;
247 unsigned long flags;
248
249 CMD_STATE(sc) = SNIC_IOREQ_NOT_INITED;
250 CMD_FLAGS(sc) = SNIC_NO_FLAGS;
251 sg_cnt = scsi_dma_map(sc);
252 if (sg_cnt < 0) {
253 SNIC_TRC((u16)snic->shost->host_no, tag, (ulong) sc, 0,
254 sc->cmnd[0], sg_cnt, CMD_STATE(sc));
255
256 SNIC_HOST_ERR(snic->shost, "issue_sc:Failed to map SG List.\n");
257 ret = -ENOMEM;
258
259 goto issue_sc_end;
260 }
261
262 rqi = snic_req_init(snic, sg_cnt);
263 if (!rqi) {
264 scsi_dma_unmap(sc);
265 ret = -ENOMEM;
266
267 goto issue_sc_end;
268 }
269
270 rqi->tgt_id = tgt->id;
271 rqi->sc = sc;
272
273 CMD_STATE(sc) = SNIC_IOREQ_PENDING;
274 CMD_SP(sc) = (char *) rqi;
275 cmd_trc = SNIC_TRC_CMD(sc);
276 CMD_FLAGS(sc) |= (SNIC_IO_INITIALIZED | SNIC_IO_ISSUED);
277 cmd_st_flags = SNIC_TRC_CMD_STATE_FLAGS(sc);
278 io_lock = snic_io_lock_hash(snic, sc);
279
280 /* create wq desc and enqueue it */
281 ret = snic_queue_icmnd_req(snic, rqi, sc, sg_cnt);
282 if (ret) {
283 SNIC_HOST_ERR(snic->shost,
284 "issue_sc: icmnd qing Failed for sc %p, err %d\n",
285 sc, ret);
286
287 spin_lock_irqsave(io_lock, flags);
288 rqi = (struct snic_req_info *) CMD_SP(sc);
289 CMD_SP(sc) = NULL;
290 CMD_STATE(sc) = SNIC_IOREQ_COMPLETE;
291 CMD_FLAGS(sc) &= ~SNIC_IO_ISSUED; /* turn off the flag */
292 spin_unlock_irqrestore(io_lock, flags);
293
294 if (rqi)
295 snic_release_req_buf(snic, rqi, sc);
296
297 SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, 0, 0, 0,
298 SNIC_TRC_CMD_STATE_FLAGS(sc));
299 } else {
300 u32 io_sz = scsi_bufflen(sc) >> 9;
301 u32 qtime = jiffies - rqi->start_time;
302 struct snic_io_stats *iostats = &snic->s_stats.io;
303
304 if (io_sz > atomic64_read(&iostats->max_io_sz))
305 atomic64_set(&iostats->max_io_sz, io_sz);
306
307 if (qtime > atomic64_read(&iostats->max_qtime))
308 atomic64_set(&iostats->max_qtime, qtime);
309
310 SNIC_SCSI_DBG(snic->shost,
311 "issue_sc:sc %p, tag %d queued to WQ.\n",
312 sc, tag);
313
314 SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, (ulong) rqi,
315 sg_cnt, cmd_trc, cmd_st_flags);
316 }
317
318issue_sc_end:
319
320 return ret;
321} /* end of snic_issue_scsi_req */
322
323
324/*
325 * snic_queuecommand
326 * Routine to send a scsi cdb to LLD
327 * Called with host_lock held and interrupts disabled
328 */
329int
330snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
331{
332 struct snic_tgt *tgt = NULL;
333 struct snic *snic = shost_priv(shost);
334 int ret;
335
336 tgt = starget_to_tgt(scsi_target(sc->device));
337 ret = snic_tgt_chkready(tgt);
338 if (ret) {
339 SNIC_HOST_ERR(shost, "Tgt %p id %d Not Ready.\n", tgt, tgt->id);
340 atomic64_inc(&snic->s_stats.misc.tgt_not_rdy);
341 sc->result = ret;
342 sc->scsi_done(sc);
343
344 return 0;
345 }
346
347 if (snic_get_state(snic) != SNIC_ONLINE) {
348 SNIC_HOST_ERR(shost, "snic state is %s\n",
349 snic_state_str[snic_get_state(snic)]);
350
351 return SCSI_MLQUEUE_HOST_BUSY;
352 }
353 atomic_inc(&snic->ios_inflight);
354
355 SNIC_SCSI_DBG(shost, "sc %p Tag %d (sc %0x) lun %lld in snic_qcmd\n",
356 sc, snic_cmd_tag(sc), sc->cmnd[0], sc->device->lun);
357
358 memset(scsi_cmd_priv(sc), 0, sizeof(struct snic_internal_io_state));
359
360 ret = snic_issue_scsi_req(snic, tgt, sc);
361 if (ret) {
362 SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret);
363 ret = SCSI_MLQUEUE_HOST_BUSY;
364 } else
365 snic_stats_update_active_ios(&snic->s_stats);
366
367 atomic_dec(&snic->ios_inflight);
368
369 return ret;
370} /* end of snic_queuecommand */
371
372/*
373 * snic_process_abts_pending_state:
374 * caller should hold IO lock
375 */
376static void
377snic_proc_tmreq_pending_state(struct snic *snic,
378 struct scsi_cmnd *sc,
379 u8 cmpl_status)
380{
381 int state = CMD_STATE(sc);
382
383 if (state == SNIC_IOREQ_ABTS_PENDING)
384 CMD_FLAGS(sc) |= SNIC_IO_ABTS_PENDING;
385 else if (state == SNIC_IOREQ_LR_PENDING)
386 CMD_FLAGS(sc) |= SNIC_DEV_RST_PENDING;
387 else
388 SNIC_BUG_ON(1);
389
390 switch (cmpl_status) {
391 case SNIC_STAT_IO_SUCCESS:
392 CMD_FLAGS(sc) |= SNIC_IO_DONE;
393 break;
394
395 case SNIC_STAT_ABORTED:
396 CMD_FLAGS(sc) |= SNIC_IO_ABORTED;
397 break;
398
399 default:
400 SNIC_BUG_ON(1);
401 }
402}
403
404/*
405 * snic_process_io_failed_state:
406 * Processes IO's error states
407 */
408static void
409snic_process_io_failed_state(struct snic *snic,
410 struct snic_icmnd_cmpl *icmnd_cmpl,
411 struct scsi_cmnd *sc,
412 u8 cmpl_stat)
413{
414 int res = 0;
415
416 switch (cmpl_stat) {
417 case SNIC_STAT_TIMEOUT: /* Req was timedout */
418 atomic64_inc(&snic->s_stats.misc.io_tmo);
419 res = DID_TIME_OUT;
420 break;
421
422 case SNIC_STAT_ABORTED: /* Req was aborted */
423 atomic64_inc(&snic->s_stats.misc.io_aborted);
424 res = DID_ABORT;
425 break;
426
427 case SNIC_STAT_DATA_CNT_MISMATCH:/* Recv/Sent more/less data than exp */
428 atomic64_inc(&snic->s_stats.misc.data_cnt_mismat);
429 scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid));
430 res = DID_ERROR;
431 break;
432
433 case SNIC_STAT_OUT_OF_RES: /* Out of resources to complete request */
434 atomic64_inc(&snic->s_stats.fw.out_of_res);
435 res = DID_REQUEUE;
436 break;
437
438 case SNIC_STAT_IO_NOT_FOUND: /* Requested I/O was not found */
439 atomic64_inc(&snic->s_stats.io.io_not_found);
440 res = DID_ERROR;
441 break;
442
443 case SNIC_STAT_SGL_INVALID: /* Req was aborted to due to sgl error*/
444 atomic64_inc(&snic->s_stats.misc.sgl_inval);
445 res = DID_ERROR;
446 break;
447
448 case SNIC_STAT_FW_ERR: /* Req terminated due to FW Error */
449 atomic64_inc(&snic->s_stats.fw.io_errs);
450 res = DID_ERROR;
451 break;
452
453 case SNIC_STAT_SCSI_ERR: /* FW hits SCSI Error */
454 atomic64_inc(&snic->s_stats.fw.scsi_errs);
455 break;
456
457 case SNIC_STAT_NOT_READY: /* XPT yet to initialize */
458 case SNIC_STAT_DEV_OFFLINE: /* Device offline */
459 res = DID_NO_CONNECT;
460 break;
461
462 case SNIC_STAT_INVALID_HDR: /* Hdr contains invalid data */
463 case SNIC_STAT_INVALID_PARM: /* Some param in req is invalid */
464 case SNIC_STAT_REQ_NOT_SUP: /* Req type is not supported */
465 case SNIC_STAT_CMND_REJECT: /* Req rejected */
466 case SNIC_STAT_FATAL_ERROR: /* XPT Error */
467 default:
468 SNIC_SCSI_DBG(snic->shost,
469 "Invalid Hdr/Param or Req Not Supported or Cmnd Rejected or Device Offline. or Unknown\n");
470 res = DID_ERROR;
471 break;
472 }
473
474 SNIC_HOST_ERR(snic->shost, "fw returns failed status %s flags 0x%llx\n",
475 snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc));
476
477 /* Set sc->result */
478 sc->result = (res << 16) | icmnd_cmpl->scsi_status;
479} /* end of snic_process_io_failed_state */
480
481/*
482 * snic_tmreq_pending : is task management in progress.
483 */
484static int
485snic_tmreq_pending(struct scsi_cmnd *sc)
486{
487 int state = CMD_STATE(sc);
488
489 return ((state == SNIC_IOREQ_ABTS_PENDING) ||
490 (state == SNIC_IOREQ_LR_PENDING));
491}
492
493/*
494 * snic_process_icmnd_cmpl_status:
495 * Caller should hold io_lock
496 */
497static int
498snic_process_icmnd_cmpl_status(struct snic *snic,
499 struct snic_icmnd_cmpl *icmnd_cmpl,
500 u8 cmpl_stat,
501 struct scsi_cmnd *sc)
502{
503 u8 scsi_stat = icmnd_cmpl->scsi_status;
504 u64 xfer_len = 0;
505 int ret = 0;
506
507 /* Mark the IO as complete */
508 CMD_STATE(sc) = SNIC_IOREQ_COMPLETE;
509
510 if (likely(cmpl_stat == SNIC_STAT_IO_SUCCESS)) {
511 sc->result = (DID_OK << 16) | scsi_stat;
512
513 xfer_len = scsi_bufflen(sc);
514
515 /* Update SCSI Cmd with resid value */
516 scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid));
517
518 if (icmnd_cmpl->flags & SNIC_ICMND_CMPL_UNDR_RUN) {
519 xfer_len -= le32_to_cpu(icmnd_cmpl->resid);
520 atomic64_inc(&snic->s_stats.misc.io_under_run);
521 }
522
523 if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
524 atomic64_inc(&snic->s_stats.misc.qfull);
525
526 ret = 0;
527 } else {
528 snic_process_io_failed_state(snic, icmnd_cmpl, sc, cmpl_stat);
529 atomic64_inc(&snic->s_stats.io.fail);
530 SNIC_HOST_ERR(snic->shost,
531 "icmnd_cmpl: IO Failed : Hdr Status %s flags 0x%llx\n",
532 snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc));
533 ret = 1;
534 }
535
536 return ret;
537} /* end of snic_process_icmnd_cmpl_status */
538
539
540/*
541 * snic_icmnd_cmpl_handler
542 * Routine to handle icmnd completions
543 */
544static void
545snic_icmnd_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
546{
547 u8 typ, hdr_stat;
548 u32 cmnd_id, hid;
549 ulong ctx;
550 struct scsi_cmnd *sc = NULL;
551 struct snic_icmnd_cmpl *icmnd_cmpl = NULL;
552 struct snic_host_req *req = NULL;
553 struct snic_req_info *rqi = NULL;
554 unsigned long flags, start_time;
555 spinlock_t *io_lock;
556 u8 sc_stat = 0;
557
558 snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
559 icmnd_cmpl = &fwreq->u.icmnd_cmpl;
560 sc_stat = icmnd_cmpl->scsi_status;
561
562 SNIC_SCSI_DBG(snic->shost,
563 "Icmnd_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,i ctx = %lx\n",
564 typ, hdr_stat, cmnd_id, hid, ctx);
565
566 if (cmnd_id >= snic->max_tag_id) {
567 SNIC_HOST_ERR(snic->shost,
568 "Icmnd_cmpl:Tag Error:Out of Range Tag %d, hdr status = %s\n",
569 cmnd_id, snic_io_status_to_str(hdr_stat));
570 return;
571 }
572
573 sc = scsi_host_find_tag(snic->shost, cmnd_id);
574 WARN_ON_ONCE(!sc);
575
576 if (!sc) {
577 atomic64_inc(&snic->s_stats.io.sc_null);
578 SNIC_HOST_ERR(snic->shost,
579 "Icmnd_cmpl: Scsi Cmnd Not found, sc = NULL Hdr Status = %s tag = 0x%x fwreq = 0x%p\n",
580 snic_io_status_to_str(hdr_stat),
581 cmnd_id,
582 fwreq);
583
584 SNIC_TRC(snic->shost->host_no, cmnd_id, 0,
585 ((u64)hdr_stat << 16 |
586 (u64)sc_stat << 8 | (u64)icmnd_cmpl->flags),
587 (ulong) fwreq, le32_to_cpu(icmnd_cmpl->resid), ctx);
588
589 return;
590 }
591
592 io_lock = snic_io_lock_hash(snic, sc);
593
594 spin_lock_irqsave(io_lock, flags);
595 rqi = (struct snic_req_info *) CMD_SP(sc);
596 SNIC_SCSI_DBG(snic->shost,
597 "Icmnd_cmpl:lun %lld sc %p cmd %xtag %d flags 0x%llx rqi %p\n",
598 sc->device->lun, sc, sc->cmnd[0], snic_cmd_tag(sc),
599 CMD_FLAGS(sc), rqi);
600
601 SNIC_BUG_ON(rqi != (struct snic_req_info *)ctx);
602 WARN_ON_ONCE(req);
603 if (!rqi) {
604 atomic64_inc(&snic->s_stats.io.req_null);
605 CMD_FLAGS(sc) |= SNIC_IO_REQ_NULL;
606 spin_unlock_irqrestore(io_lock, flags);
607
608 SNIC_HOST_ERR(snic->shost,
609 "Icmnd_cmpl:Host Req Not Found(null), Hdr Status %s, Tag 0x%x, sc 0x%p flags 0x%llx\n",
610 snic_io_status_to_str(hdr_stat),
611 cmnd_id, sc, CMD_FLAGS(sc));
612 return;
613 }
614
615 rqi = (struct snic_req_info *) ctx;
616 start_time = rqi->start_time;
617
618 /* firmware completed the io */
619 rqi->io_cmpl = 1;
620
621 /*
622 * if SCSI-ML has already issued abort on this command,
623 * ignore completion of the IO. The abts path will clean it up
624 */
625 if (unlikely(snic_tmreq_pending(sc))) {
626 snic_proc_tmreq_pending_state(snic, sc, hdr_stat);
627 spin_unlock_irqrestore(io_lock, flags);
628
629 snic_stats_update_io_cmpl(&snic->s_stats);
630
631 /* Expected value is SNIC_STAT_ABORTED */
632 if (likely(hdr_stat == SNIC_STAT_ABORTED))
633 return;
634
635 SNIC_SCSI_DBG(snic->shost,
636 "icmnd_cmpl:TM Req Pending(%s), Hdr Status %s sc 0x%p scsi status %x resid %d flags 0x%llx\n",
637 snic_ioreq_state_to_str(CMD_STATE(sc)),
638 snic_io_status_to_str(hdr_stat),
639 sc, sc_stat, le32_to_cpu(icmnd_cmpl->resid),
640 CMD_FLAGS(sc));
641
642 SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
643 jiffies_to_msecs(jiffies - start_time), (ulong) fwreq,
644 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
645
646 return;
647 }
648
649 if (snic_process_icmnd_cmpl_status(snic, icmnd_cmpl, hdr_stat, sc)) {
650 scsi_print_command(sc);
651 SNIC_HOST_ERR(snic->shost,
652 "icmnd_cmpl:IO Failed, sc 0x%p Tag %d Cmd %x Hdr Status %s flags 0x%llx\n",
653 sc, sc->cmnd[0], cmnd_id,
654 snic_io_status_to_str(hdr_stat), CMD_FLAGS(sc));
655 }
656
657 /* Break link with the SCSI Command */
658 CMD_SP(sc) = NULL;
659 CMD_FLAGS(sc) |= SNIC_IO_DONE;
660
661 spin_unlock_irqrestore(io_lock, flags);
662
663 /* For now, consider only successful IO. */
664 snic_calc_io_process_time(snic, rqi);
665
666 snic_release_req_buf(snic, rqi, sc);
667
668 SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
669 jiffies_to_msecs(jiffies - start_time), (ulong) fwreq,
670 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
671
672
673 if (sc->scsi_done)
674 sc->scsi_done(sc);
675
676 snic_stats_update_io_cmpl(&snic->s_stats);
677} /* end of snic_icmnd_cmpl_handler */
678
679static void
680snic_proc_dr_cmpl_locked(struct snic *snic,
681 struct snic_fw_req *fwreq,
682 u8 cmpl_stat,
683 u32 cmnd_id,
684 struct scsi_cmnd *sc)
685{
686 struct snic_req_info *rqi = (struct snic_req_info *) CMD_SP(sc);
687 u32 start_time = rqi->start_time;
688
689 CMD_LR_STATUS(sc) = cmpl_stat;
690
691 SNIC_SCSI_DBG(snic->shost, "itmf_cmpl: Cmd State = %s\n",
692 snic_ioreq_state_to_str(CMD_STATE(sc)));
693
694 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
695 CMD_FLAGS(sc) |= SNIC_DEV_RST_ABTS_PENDING;
696
697 SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
698 jiffies_to_msecs(jiffies - start_time),
699 (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc));
700
701 SNIC_SCSI_DBG(snic->shost,
702 "itmf_cmpl: Terminate Pending Dev Reset Cmpl Recvd.id %x, status %s flags 0x%llx\n",
703 (int)(cmnd_id & SNIC_TAG_MASK),
704 snic_io_status_to_str(cmpl_stat),
705 CMD_FLAGS(sc));
706
707 return;
708 }
709
710
711 if (CMD_FLAGS(sc) & SNIC_DEV_RST_TIMEDOUT) {
712 SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
713 jiffies_to_msecs(jiffies - start_time),
714 (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc));
715
716 SNIC_SCSI_DBG(snic->shost,
717 "itmf_cmpl:Dev Reset Completion Received after timeout. id %d cmpl status %s flags 0x%llx\n",
718 (int)(cmnd_id & SNIC_TAG_MASK),
719 snic_io_status_to_str(cmpl_stat),
720 CMD_FLAGS(sc));
721
722 return;
723 }
724
725 CMD_STATE(sc) = SNIC_IOREQ_LR_COMPLETE;
726 CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE;
727
728 SNIC_SCSI_DBG(snic->shost,
729 "itmf_cmpl:Dev Reset Cmpl Recvd id %d cmpl status %s flags 0x%llx\n",
730 (int)(cmnd_id & SNIC_TAG_MASK),
731 snic_io_status_to_str(cmpl_stat),
732 CMD_FLAGS(sc));
733
734 if (rqi->dr_done)
735 complete(rqi->dr_done);
736} /* end of snic_proc_dr_cmpl_locked */
737
738/*
739 * snic_update_abort_stats : Updates abort stats based on completion status.
740 */
741static void
742snic_update_abort_stats(struct snic *snic, u8 cmpl_stat)
743{
744 struct snic_abort_stats *abt_stats = &snic->s_stats.abts;
745
746 SNIC_SCSI_DBG(snic->shost, "Updating Abort stats.\n");
747
748 switch (cmpl_stat) {
749 case SNIC_STAT_IO_SUCCESS:
750 break;
751
752 case SNIC_STAT_TIMEOUT:
753 atomic64_inc(&abt_stats->fw_tmo);
754 break;
755
756 case SNIC_STAT_IO_NOT_FOUND:
757 atomic64_inc(&abt_stats->io_not_found);
758 break;
759
760 default:
761 atomic64_inc(&abt_stats->fail);
762 break;
763 }
764}
765
766static int
767snic_process_itmf_cmpl(struct snic *snic,
768 struct snic_fw_req *fwreq,
769 u32 cmnd_id,
770 u8 cmpl_stat,
771 struct scsi_cmnd *sc)
772{
773 struct snic_req_info *rqi = NULL;
774 u32 tm_tags = 0;
775 spinlock_t *io_lock = NULL;
776 unsigned long flags;
777 u32 start_time = 0;
778 int ret = 0;
779
780 io_lock = snic_io_lock_hash(snic, sc);
781 spin_lock_irqsave(io_lock, flags);
782 rqi = (struct snic_req_info *) CMD_SP(sc);
783 WARN_ON_ONCE(!rqi);
784
785 if (!rqi) {
786 atomic64_inc(&snic->s_stats.io.req_null);
787 spin_unlock_irqrestore(io_lock, flags);
788 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
789 SNIC_HOST_ERR(snic->shost,
790 "itmf_cmpl: rqi is null,Hdr stat = %s Tag = 0x%x sc = 0x%p flags 0x%llx\n",
791 snic_io_status_to_str(cmpl_stat), cmnd_id, sc,
792 CMD_FLAGS(sc));
793
794 return ret;
795 }
796
797 /* Extract task management flags */
798 tm_tags = cmnd_id & ~(SNIC_TAG_MASK);
799
800 start_time = rqi->start_time;
801 cmnd_id &= (SNIC_TAG_MASK);
802
803 switch (tm_tags) {
804 case SNIC_TAG_ABORT:
805 /* Abort only issued on cmd */
806 snic_update_abort_stats(snic, cmpl_stat);
807
808 if (CMD_STATE(sc) != SNIC_IOREQ_ABTS_PENDING) {
809 /* This is a late completion. Ignore it. */
810 ret = -1;
811 spin_unlock_irqrestore(io_lock, flags);
812 break;
813 }
814
815 CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
816 CMD_ABTS_STATUS(sc) = cmpl_stat;
817 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE;
818
819 SNIC_SCSI_DBG(snic->shost,
820 "itmf_cmpl:Abort Cmpl Recvd.Tag 0x%x Status %s flags 0x%llx\n",
821 cmnd_id,
822 snic_io_status_to_str(cmpl_stat),
823 CMD_FLAGS(sc));
824
825 /*
826 * If scsi_eh thread is blocked waiting for abts complete,
827 * signal completion to it. IO will be cleaned in the thread,
828 * else clean it in this context.
829 */
830 if (rqi->abts_done) {
831 complete(rqi->abts_done);
832 spin_unlock_irqrestore(io_lock, flags);
833
834 break; /* jump out */
835 }
836
837 CMD_SP(sc) = NULL;
838 sc->result = (DID_ERROR << 16);
839 SNIC_SCSI_DBG(snic->shost,
840 "itmf_cmpl: Completing IO. sc %p flags 0x%llx\n",
841 sc, CMD_FLAGS(sc));
842
843 spin_unlock_irqrestore(io_lock, flags);
844
845 snic_release_req_buf(snic, rqi, sc);
846
847 if (sc->scsi_done) {
848 SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
849 jiffies_to_msecs(jiffies - start_time),
850 (ulong) fwreq, SNIC_TRC_CMD(sc),
851 SNIC_TRC_CMD_STATE_FLAGS(sc));
852
853 sc->scsi_done(sc);
854 }
855
856 break;
857
858 case SNIC_TAG_DEV_RST:
859 case SNIC_TAG_DEV_RST | SNIC_TAG_IOCTL_DEV_RST:
860 snic_proc_dr_cmpl_locked(snic, fwreq, cmpl_stat, cmnd_id, sc);
861 spin_unlock_irqrestore(io_lock, flags);
862 ret = 0;
863
864 break;
865
866 case SNIC_TAG_ABORT | SNIC_TAG_DEV_RST:
867 /* Abort and terminate completion of device reset req */
868
869 CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
870 CMD_ABTS_STATUS(sc) = cmpl_stat;
871 CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE;
872
873 SNIC_SCSI_DBG(snic->shost,
874 "itmf_cmpl:dev reset abts cmpl recvd. id %d status %s flags 0x%llx\n",
875 cmnd_id, snic_io_status_to_str(cmpl_stat),
876 CMD_FLAGS(sc));
877
878 if (rqi->abts_done)
879 complete(rqi->abts_done);
880
881 spin_unlock_irqrestore(io_lock, flags);
882
883 break;
884
885 default:
886 spin_unlock_irqrestore(io_lock, flags);
887 SNIC_HOST_ERR(snic->shost,
888 "itmf_cmpl: Unknown TM tag bit 0x%x\n", tm_tags);
889
890 SNIC_HOST_ERR(snic->shost,
891 "itmf_cmpl:Unexpected itmf io stat %s Tag = 0x%x flags 0x%llx\n",
892 snic_ioreq_state_to_str(CMD_STATE(sc)),
893 cmnd_id,
894 CMD_FLAGS(sc));
895 ret = -1;
896 SNIC_BUG_ON(1);
897
898 break;
899 }
900
901 return ret;
902} /* end of snic_process_itmf_cmpl_status */
903
904/*
905 * snic_itmf_cmpl_handler.
906 * Routine to handle itmf completions.
907 */
908static void
909snic_itmf_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
910{
911 struct scsi_cmnd *sc = NULL;
912 struct snic_req_info *rqi = NULL;
913 struct snic_itmf_cmpl *itmf_cmpl = NULL;
914 ulong ctx;
915 u32 cmnd_id;
916 u32 hid;
917 u8 typ;
918 u8 hdr_stat;
919
920 snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
921 SNIC_SCSI_DBG(snic->shost,
922 "Itmf_cmpl: %s: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,ctx = %lx\n",
923 __func__, typ, hdr_stat, cmnd_id, hid, ctx);
924
925 itmf_cmpl = &fwreq->u.itmf_cmpl;
926 SNIC_SCSI_DBG(snic->shost,
927 "Itmf_cmpl: nterm %u , flags 0x%x\n",
928 le32_to_cpu(itmf_cmpl->nterminated), itmf_cmpl->flags);
929
930 /* spl case, dev reset issued through ioctl */
931 if (cmnd_id & SNIC_TAG_IOCTL_DEV_RST) {
932 rqi = (struct snic_req_info *) ctx;
933 sc = rqi->sc;
934
935 goto ioctl_dev_rst;
936 }
937
938 if ((cmnd_id & SNIC_TAG_MASK) >= snic->max_tag_id) {
939 SNIC_HOST_ERR(snic->shost,
940 "Itmf_cmpl: Tag 0x%x out of Range,HdrStat %s\n",
941 cmnd_id, snic_io_status_to_str(hdr_stat));
942 SNIC_BUG_ON(1);
943
944 return;
945 }
946
947 sc = scsi_host_find_tag(snic->shost, cmnd_id & SNIC_TAG_MASK);
948 WARN_ON_ONCE(!sc);
949
950ioctl_dev_rst:
951 if (!sc) {
952 atomic64_inc(&snic->s_stats.io.sc_null);
953 SNIC_HOST_ERR(snic->shost,
954 "Itmf_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n",
955 snic_io_status_to_str(hdr_stat), cmnd_id);
956
957 return;
958 }
959
960 snic_process_itmf_cmpl(snic, fwreq, cmnd_id, hdr_stat, sc);
961} /* end of snic_itmf_cmpl_handler */
962
963
964
965static void
966snic_hba_reset_scsi_cleanup(struct snic *snic, struct scsi_cmnd *sc)
967{
968 struct snic_stats *st = &snic->s_stats;
969 long act_ios = 0, act_fwreqs = 0;
970
971 SNIC_SCSI_DBG(snic->shost, "HBA Reset scsi cleanup.\n");
972 snic_scsi_cleanup(snic, snic_cmd_tag(sc));
973
974 /* Update stats on pending IOs */
975 act_ios = atomic64_read(&st->io.active);
976 atomic64_add(act_ios, &st->io.compl);
977 atomic64_sub(act_ios, &st->io.active);
978
979 act_fwreqs = atomic64_read(&st->fw.actv_reqs);
980 atomic64_sub(act_fwreqs, &st->fw.actv_reqs);
981}
982
983/*
984 * snic_hba_reset_cmpl_handler :
985 *
986 * Notes :
987 * 1. Cleanup all the scsi cmds, release all snic specific cmds
988 * 2. Issue Report Targets in case of SAN targets
989 */
990static int
991snic_hba_reset_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
992{
993 ulong ctx;
994 u32 cmnd_id;
995 u32 hid;
996 u8 typ;
997 u8 hdr_stat;
998 struct scsi_cmnd *sc = NULL;
999 struct snic_req_info *rqi = NULL;
1000 spinlock_t *io_lock = NULL;
1001 unsigned long flags, gflags;
1002 int ret = 0;
1003
1004 SNIC_HOST_INFO(snic->shost,
1005 "reset_cmpl:HBA Reset Completion received.\n");
1006
1007 snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
1008 SNIC_SCSI_DBG(snic->shost,
1009 "reset_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n",
1010 typ, hdr_stat, cmnd_id, hid, ctx);
1011
1012 /* spl case, host reset issued through ioctl */
1013 if (cmnd_id == SCSI_NO_TAG) {
1014 rqi = (struct snic_req_info *) ctx;
1015 sc = rqi->sc;
1016
1017 goto ioctl_hba_rst;
1018 }
1019
1020 if (cmnd_id >= snic->max_tag_id) {
1021 SNIC_HOST_ERR(snic->shost,
1022 "reset_cmpl: Tag 0x%x out of Range,HdrStat %s\n",
1023 cmnd_id, snic_io_status_to_str(hdr_stat));
1024 SNIC_BUG_ON(1);
1025
1026 return 1;
1027 }
1028
1029 sc = scsi_host_find_tag(snic->shost, cmnd_id);
1030ioctl_hba_rst:
1031 if (!sc) {
1032 atomic64_inc(&snic->s_stats.io.sc_null);
1033 SNIC_HOST_ERR(snic->shost,
1034 "reset_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n",
1035 snic_io_status_to_str(hdr_stat), cmnd_id);
1036 ret = 1;
1037
1038 return ret;
1039 }
1040
1041 io_lock = snic_io_lock_hash(snic, sc);
1042 spin_lock_irqsave(io_lock, flags);
1043
1044 if (!snic->remove_wait) {
1045 spin_unlock_irqrestore(io_lock, flags);
1046 SNIC_HOST_ERR(snic->shost,
1047 "reset_cmpl:host reset completed after timout\n");
1048 ret = 1;
1049
1050 return ret;
1051 }
1052
1053 rqi = (struct snic_req_info *) CMD_SP(sc);
1054 WARN_ON_ONCE(!rqi);
1055
1056 if (!rqi) {
1057 atomic64_inc(&snic->s_stats.io.req_null);
1058 spin_unlock_irqrestore(io_lock, flags);
1059 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
1060 SNIC_HOST_ERR(snic->shost,
1061 "reset_cmpl: rqi is null,Hdr stat %s Tag 0x%x sc 0x%p flags 0x%llx\n",
1062 snic_io_status_to_str(hdr_stat), cmnd_id, sc,
1063 CMD_FLAGS(sc));
1064
1065 ret = 1;
1066
1067 return ret;
1068 }
1069 /* stats */
1070 spin_unlock_irqrestore(io_lock, flags);
1071
1072 /* scsi cleanup */
1073 snic_hba_reset_scsi_cleanup(snic, sc);
1074
1075 SNIC_BUG_ON(snic_get_state(snic) != SNIC_OFFLINE &&
1076 snic_get_state(snic) != SNIC_FWRESET);
1077
1078 /* Careful locking between snic_lock and io lock */
1079 spin_lock_irqsave(io_lock, flags);
1080 spin_lock_irqsave(&snic->snic_lock, gflags);
1081 if (snic_get_state(snic) == SNIC_FWRESET)
1082 snic_set_state(snic, SNIC_ONLINE);
1083 spin_unlock_irqrestore(&snic->snic_lock, gflags);
1084
1085 if (snic->remove_wait)
1086 complete(snic->remove_wait);
1087
1088 spin_unlock_irqrestore(io_lock, flags);
1089 atomic64_inc(&snic->s_stats.reset.hba_reset_cmpl);
1090
1091 ret = 0;
1092 /* Rediscovery is for SAN */
1093 if (snic->config.xpt_type == SNIC_DAS)
1094 return ret;
1095
1096 SNIC_SCSI_DBG(snic->shost, "reset_cmpl: Queuing discovery work.\n");
1097 queue_work(snic_glob->event_q, &snic->disc_work);
1098
1099 return ret;
1100}
1101
1102static void
1103snic_msg_ack_handler(struct snic *snic, struct snic_fw_req *fwreq)
1104{
1105 SNIC_HOST_INFO(snic->shost, "Message Ack Received.\n");
1106
1107 SNIC_ASSERT_NOT_IMPL(1);
1108}
1109
1110static void
1111snic_aen_handler(struct snic *snic, struct snic_fw_req *fwreq)
1112{
1113 u8 typ, hdr_stat;
1114 u32 cmnd_id, hid;
1115 ulong ctx;
1116 struct snic_async_evnotify *aen = &fwreq->u.async_ev;
1117 u32 event_id = 0;
1118
1119 snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
1120 SNIC_SCSI_DBG(snic->shost,
1121 "aen: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n",
1122 typ, hdr_stat, cmnd_id, hid, ctx);
1123
1124 event_id = le32_to_cpu(aen->ev_id);
1125
1126 switch (event_id) {
1127 case SNIC_EV_TGT_OFFLINE:
1128 SNIC_HOST_INFO(snic->shost, "aen:TGT_OFFLINE Event Recvd.\n");
1129 break;
1130
1131 case SNIC_EV_TGT_ONLINE:
1132 SNIC_HOST_INFO(snic->shost, "aen:TGT_ONLINE Event Recvd.\n");
1133 break;
1134
1135 case SNIC_EV_LUN_OFFLINE:
1136 SNIC_HOST_INFO(snic->shost, "aen:LUN_OFFLINE Event Recvd.\n");
1137 break;
1138
1139 case SNIC_EV_LUN_ONLINE:
1140 SNIC_HOST_INFO(snic->shost, "aen:LUN_ONLINE Event Recvd.\n");
1141 break;
1142
1143 case SNIC_EV_CONF_CHG:
1144 SNIC_HOST_INFO(snic->shost, "aen:Config Change Event Recvd.\n");
1145 break;
1146
1147 case SNIC_EV_TGT_ADDED:
1148 SNIC_HOST_INFO(snic->shost, "aen:TGT_ADD Event Recvd.\n");
1149 break;
1150
1151 case SNIC_EV_TGT_DELTD:
1152 SNIC_HOST_INFO(snic->shost, "aen:TGT_DEL Event Recvd.\n");
1153 break;
1154
1155 case SNIC_EV_LUN_ADDED:
1156 SNIC_HOST_INFO(snic->shost, "aen:LUN_ADD Event Recvd.\n");
1157 break;
1158
1159 case SNIC_EV_LUN_DELTD:
1160 SNIC_HOST_INFO(snic->shost, "aen:LUN_DEL Event Recvd.\n");
1161 break;
1162
1163 case SNIC_EV_DISC_CMPL:
1164 SNIC_HOST_INFO(snic->shost, "aen:DISC_CMPL Event Recvd.\n");
1165 break;
1166
1167 default:
1168 SNIC_HOST_INFO(snic->shost, "aen:Unknown Event Recvd.\n");
1169 SNIC_BUG_ON(1);
1170 break;
1171 }
1172
1173 SNIC_ASSERT_NOT_IMPL(1);
1174} /* end of snic_aen_handler */
1175
1176/*
1177 * snic_io_cmpl_handler
1178 * Routine to process CQ entries(IO Completions) posted by fw.
1179 */
1180static int
1181snic_io_cmpl_handler(struct vnic_dev *vdev,
1182 unsigned int cq_idx,
1183 struct snic_fw_req *fwreq)
1184{
1185 struct snic *snic = svnic_dev_priv(vdev);
1186 u64 start = jiffies, cmpl_time;
1187
1188 snic_print_desc(__func__, (char *)fwreq, sizeof(*fwreq));
1189
1190 /* Update FW Stats */
1191 if ((fwreq->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL) &&
1192 (fwreq->hdr.type <= SNIC_RSP_BOOT_LUNS_CMPL))
1193 atomic64_dec(&snic->s_stats.fw.actv_reqs);
1194
1195 SNIC_BUG_ON((fwreq->hdr.type > SNIC_RSP_BOOT_LUNS_CMPL) &&
1196 (fwreq->hdr.type < SNIC_MSG_ASYNC_EVNOTIFY));
1197
1198 /* Check for snic subsys errors */
1199 switch (fwreq->hdr.status) {
1200 case SNIC_STAT_NOT_READY: /* XPT yet to initialize */
1201 SNIC_HOST_ERR(snic->shost,
1202 "sNIC SubSystem is NOT Ready.\n");
1203 break;
1204
1205 case SNIC_STAT_FATAL_ERROR: /* XPT Error */
1206 SNIC_HOST_ERR(snic->shost,
1207 "sNIC SubSystem in Unrecoverable State.\n");
1208 break;
1209 }
1210
1211 switch (fwreq->hdr.type) {
1212 case SNIC_RSP_EXCH_VER_CMPL:
1213 snic_io_exch_ver_cmpl_handler(snic, fwreq);
1214 break;
1215
1216 case SNIC_RSP_REPORT_TGTS_CMPL:
1217 snic_report_tgt_cmpl_handler(snic, fwreq);
1218 break;
1219
1220 case SNIC_RSP_ICMND_CMPL:
1221 snic_icmnd_cmpl_handler(snic, fwreq);
1222 break;
1223
1224 case SNIC_RSP_ITMF_CMPL:
1225 snic_itmf_cmpl_handler(snic, fwreq);
1226 break;
1227
1228 case SNIC_RSP_HBA_RESET_CMPL:
1229 snic_hba_reset_cmpl_handler(snic, fwreq);
1230 break;
1231
1232 case SNIC_MSG_ACK:
1233 snic_msg_ack_handler(snic, fwreq);
1234 break;
1235
1236 case SNIC_MSG_ASYNC_EVNOTIFY:
1237 snic_aen_handler(snic, fwreq);
1238 break;
1239
1240 default:
1241 SNIC_BUG_ON(1);
1242 SNIC_SCSI_DBG(snic->shost,
1243 "Unknown Firmwqre completion request type %d\n",
1244 fwreq->hdr.type);
1245 break;
1246 }
1247
1248 /* Update Stats */
1249 cmpl_time = jiffies - start;
1250 if (cmpl_time > atomic64_read(&snic->s_stats.io.max_cmpl_time))
1251 atomic64_set(&snic->s_stats.io.max_cmpl_time, cmpl_time);
1252
1253 return 0;
1254} /* end of snic_io_cmpl_handler */
1255
1256/*
1257 * snic_fwcq_cmpl_handler
1258 * Routine to process fwCQ
1259 * This CQ is independent, and not associated with wq/rq/wq_copy queues
1260 */
1261int
1262snic_fwcq_cmpl_handler(struct snic *snic, int io_cmpl_work)
1263{
1264 unsigned int num_ent = 0; /* number cq entries processed */
1265 unsigned int cq_idx;
1266 unsigned int nent_per_cq;
1267 struct snic_misc_stats *misc_stats = &snic->s_stats.misc;
1268
1269 for (cq_idx = snic->wq_count; cq_idx < snic->cq_count; cq_idx++) {
1270 nent_per_cq = vnic_cq_fw_service(&snic->cq[cq_idx],
1271 snic_io_cmpl_handler,
1272 io_cmpl_work);
1273 num_ent += nent_per_cq;
1274
1275 if (nent_per_cq > atomic64_read(&misc_stats->max_cq_ents))
1276 atomic64_set(&misc_stats->max_cq_ents, nent_per_cq);
1277 }
1278
1279 return num_ent;
1280} /* end of snic_fwcq_cmpl_handler */
1281
1282/*
1283 * snic_queue_itmf_req: Common API to queue Task Management requests.
1284 * Use rqi->tm_tag for passing special tags.
1285 * @req_id : aborted request's tag, -1 for lun reset.
1286 */
1287static int
1288snic_queue_itmf_req(struct snic *snic,
1289 struct snic_host_req *tmreq,
1290 struct scsi_cmnd *sc,
1291 u32 tmf,
1292 u32 req_id)
1293{
1294 struct snic_req_info *rqi = req_to_rqi(tmreq);
1295 struct scsi_lun lun;
1296 int tm_tag = snic_cmd_tag(sc) | rqi->tm_tag;
1297 int ret = 0;
1298
1299 SNIC_BUG_ON(!rqi);
1300 SNIC_BUG_ON(!rqi->tm_tag);
1301
1302 /* fill in lun info */
1303 int_to_scsilun(sc->device->lun, &lun);
1304
1305 /* Initialize snic_host_req: itmf */
1306 snic_itmf_init(tmreq,
1307 tm_tag,
1308 snic->config.hid,
1309 (ulong) rqi,
1310 0 /* flags */,
1311 req_id, /* Command to be aborted. */
1312 rqi->tgt_id,
1313 lun.scsi_lun,
1314 tmf);
1315
1316 /*
1317 * In case of multiple aborts on same cmd,
1318 * use try_wait_for_completion and completion_done() to check
1319 * whether it queues aborts even after completion of abort issued
1320 * prior.SNIC_BUG_ON(completion_done(&rqi->done));
1321 */
1322
1323 ret = snic_queue_wq_desc(snic, tmreq, sizeof(*tmreq));
1324 if (ret)
1325 SNIC_HOST_ERR(snic->shost,
1326 "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d tag %d Failed, ret = %d\n",
1327 tmf, sc, rqi, req_id, snic_cmd_tag(sc), ret);
1328 else
1329 SNIC_SCSI_DBG(snic->shost,
1330 "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d, tag %d (req_id)- Success.",
1331 tmf, sc, rqi, req_id, snic_cmd_tag(sc));
1332
1333 return ret;
1334} /* end of snic_queue_itmf_req */
1335
1336static int
1337snic_issue_tm_req(struct snic *snic,
1338 struct snic_req_info *rqi,
1339 struct scsi_cmnd *sc,
1340 int tmf)
1341{
1342 struct snic_host_req *tmreq = NULL;
1343 int req_id = 0, tag = snic_cmd_tag(sc);
1344 int ret = 0;
1345
1346 if (snic_get_state(snic) == SNIC_FWRESET)
1347 return -EBUSY;
1348
1349 atomic_inc(&snic->ios_inflight);
1350
1351 SNIC_SCSI_DBG(snic->shost,
1352 "issu_tmreq: Task mgmt req %d. rqi %p w/ tag %x\n",
1353 tmf, rqi, tag);
1354
1355
1356 if (tmf == SNIC_ITMF_LUN_RESET) {
1357 tmreq = snic_dr_req_init(snic, rqi);
1358 req_id = SCSI_NO_TAG;
1359 } else {
1360 tmreq = snic_abort_req_init(snic, rqi);
1361 req_id = tag;
1362 }
1363
1364 if (!tmreq) {
1365 ret = -ENOMEM;
1366
1367 goto tmreq_err;
1368 }
1369
1370 ret = snic_queue_itmf_req(snic, tmreq, sc, tmf, req_id);
1371 if (ret)
1372 goto tmreq_err;
1373
1374 ret = 0;
1375
1376tmreq_err:
1377 if (ret) {
1378 SNIC_HOST_ERR(snic->shost,
1379 "issu_tmreq: Queing ITMF(%d) Req, sc %p rqi %p req_id %d tag %x fails err = %d\n",
1380 tmf, sc, rqi, req_id, tag, ret);
1381 } else {
1382 SNIC_SCSI_DBG(snic->shost,
1383 "issu_tmreq: Queuing ITMF(%d) Req, sc %p, rqi %p, req_id %d tag %x - Success.\n",
1384 tmf, sc, rqi, req_id, tag);
1385 }
1386
1387 atomic_dec(&snic->ios_inflight);
1388
1389 return ret;
1390}
1391
1392/*
1393 * snic_queue_abort_req : Queues abort req to WQ
1394 */
1395static int
1396snic_queue_abort_req(struct snic *snic,
1397 struct snic_req_info *rqi,
1398 struct scsi_cmnd *sc,
1399 int tmf)
1400{
1401 SNIC_SCSI_DBG(snic->shost, "q_abtreq: sc %p, rqi %p, tag %x, tmf %d\n",
1402 sc, rqi, snic_cmd_tag(sc), tmf);
1403
1404 /* Add special tag for abort */
1405 rqi->tm_tag |= SNIC_TAG_ABORT;
1406
1407 return snic_issue_tm_req(snic, rqi, sc, tmf);
1408}
1409
1410/*
1411 * snic_abort_finish : called by snic_abort_cmd on queuing abort successfully.
1412 */
1413static int
1414snic_abort_finish(struct snic *snic, struct scsi_cmnd *sc)
1415{
1416 struct snic_req_info *rqi = NULL;
1417 spinlock_t *io_lock = NULL;
1418 unsigned long flags;
1419 int ret = 0, tag = snic_cmd_tag(sc);
1420
1421 io_lock = snic_io_lock_hash(snic, sc);
1422 spin_lock_irqsave(io_lock, flags);
1423 rqi = (struct snic_req_info *) CMD_SP(sc);
1424 if (!rqi) {
1425 atomic64_inc(&snic->s_stats.io.req_null);
1426 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
1427
1428 SNIC_SCSI_DBG(snic->shost,
1429 "abt_fini:req info is null tag 0x%x, sc 0x%p flags 0x%llx\n",
1430 tag, sc, CMD_FLAGS(sc));
1431 ret = FAILED;
1432
1433 goto abort_fail;
1434 }
1435
1436 rqi->abts_done = NULL;
1437
1438 ret = FAILED;
1439
1440 /* Check the abort status. */
1441 switch (CMD_ABTS_STATUS(sc)) {
1442 case SNIC_INVALID_CODE:
1443 /* Firmware didn't complete abort req, timedout */
1444 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TIMEDOUT;
1445 atomic64_inc(&snic->s_stats.abts.drv_tmo);
1446 SNIC_SCSI_DBG(snic->shost,
1447 "abt_fini:sc %p Tag %x Driver Timeout.flags 0x%llx\n",
1448 sc, snic_cmd_tag(sc), CMD_FLAGS(sc));
1449 /* do not release snic request in timedout case */
1450 rqi = NULL;
1451
1452 goto abort_fail;
1453
1454 case SNIC_STAT_IO_SUCCESS:
1455 case SNIC_STAT_IO_NOT_FOUND:
1456 ret = SUCCESS;
1457 break;
1458
1459 default:
1460 /* Firmware completed abort with error */
1461 ret = FAILED;
1462 break;
1463 }
1464
1465 CMD_SP(sc) = NULL;
1466 SNIC_HOST_INFO(snic->shost,
1467 "abt_fini: Tag %x, Cmpl Status %s flags 0x%llx\n",
1468 tag, snic_io_status_to_str(CMD_ABTS_STATUS(sc)),
1469 CMD_FLAGS(sc));
1470
1471abort_fail:
1472 spin_unlock_irqrestore(io_lock, flags);
1473 if (rqi)
1474 snic_release_req_buf(snic, rqi, sc);
1475
1476 return ret;
1477} /* end of snic_abort_finish */
1478
1479/*
1480 * snic_send_abort_and_wait : Issues Abort, and Waits
1481 */
1482static int
1483snic_send_abort_and_wait(struct snic *snic, struct scsi_cmnd *sc)
1484{
1485 struct snic_req_info *rqi = NULL;
1486 enum snic_ioreq_state sv_state;
1487 struct snic_tgt *tgt = NULL;
1488 spinlock_t *io_lock = NULL;
1489 DECLARE_COMPLETION_ONSTACK(tm_done);
1490 unsigned long flags;
1491 int ret = 0, tmf = 0, tag = snic_cmd_tag(sc);
1492
1493 tgt = starget_to_tgt(scsi_target(sc->device));
1494 if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN))
1495 tmf = SNIC_ITMF_ABTS_TASK_TERM;
1496 else
1497 tmf = SNIC_ITMF_ABTS_TASK;
1498
1499 /* stats */
1500
1501 io_lock = snic_io_lock_hash(snic, sc);
1502
1503 /*
1504 * Avoid a race between SCSI issuing the abort and the device
1505 * completing the command.
1506 *
1507 * If the command is already completed by fw_cmpl code,
1508 * we just return SUCCESS from here. This means that the abort
1509 * succeeded. In the SCSI ML, since the timeout for command has
1510 * happend, the completion wont actually complete the command
1511 * and it will be considered as an aborted command
1512 *
1513 * The CMD_SP will not be cleared except while holding io_lock
1514 */
1515 spin_lock_irqsave(io_lock, flags);
1516 rqi = (struct snic_req_info *) CMD_SP(sc);
1517 if (!rqi) {
1518 spin_unlock_irqrestore(io_lock, flags);
1519
1520 SNIC_HOST_ERR(snic->shost,
1521 "abt_cmd: rqi is null. Tag %d flags 0x%llx\n",
1522 tag, CMD_FLAGS(sc));
1523
1524 ret = SUCCESS;
1525
1526 goto send_abts_end;
1527 }
1528
1529 rqi->abts_done = &tm_done;
1530 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
1531 spin_unlock_irqrestore(io_lock, flags);
1532
1533 ret = 0;
1534 goto abts_pending;
1535 }
1536 SNIC_BUG_ON(!rqi->abts_done);
1537
1538 /* Save Command State, should be restored on failed to Queue. */
1539 sv_state = CMD_STATE(sc);
1540
1541 /*
1542 * Command is still pending, need to abort it
1543 * If the fw completes the command after this point,
1544 * the completion won't be done till mid-layer, since abot
1545 * has already started.
1546 */
1547 CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
1548 CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
1549
1550 SNIC_SCSI_DBG(snic->shost, "send_abt_cmd: TAG 0x%x\n", tag);
1551
1552 spin_unlock_irqrestore(io_lock, flags);
1553
1554 /* Now Queue the abort command to firmware */
1555 ret = snic_queue_abort_req(snic, rqi, sc, tmf);
1556 if (ret) {
1557 SNIC_HOST_ERR(snic->shost,
1558 "send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n",
1559 tag, ret, CMD_FLAGS(sc));
1560
1561 spin_lock_irqsave(io_lock, flags);
1562 /* Restore Command's previous state */
1563 CMD_STATE(sc) = sv_state;
1564 rqi = (struct snic_req_info *) CMD_SP(sc);
1565 if (rqi)
1566 rqi->abts_done = NULL;
1567 spin_unlock_irqrestore(io_lock, flags);
1568 ret = FAILED;
1569
1570 goto send_abts_end;
1571 }
1572
1573 spin_lock_irqsave(io_lock, flags);
1574 if (tmf == SNIC_ITMF_ABTS_TASK) {
1575 CMD_FLAGS(sc) |= SNIC_IO_ABTS_ISSUED;
1576 atomic64_inc(&snic->s_stats.abts.num);
1577 } else {
1578 /* term stats */
1579 CMD_FLAGS(sc) |= SNIC_IO_TERM_ISSUED;
1580 }
1581 spin_unlock_irqrestore(io_lock, flags);
1582
1583 SNIC_SCSI_DBG(snic->shost,
1584 "send_abt_cmd: sc %p Tag %x flags 0x%llx\n",
1585 sc, tag, CMD_FLAGS(sc));
1586
1587
1588 ret = 0;
1589
1590abts_pending:
1591 /*
1592 * Queued an abort IO, wait for its completion.
1593 * Once the fw completes the abort command, it will
1594 * wakeup this thread.
1595 */
1596 wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT);
1597
1598send_abts_end:
1599 return ret;
1600} /* end of snic_send_abort_and_wait */
1601
1602/*
1603 * This function is exported to SCSI for sending abort cmnds.
1604 * A SCSI IO is represent by snic_ioreq in the driver.
1605 * The snic_ioreq is linked to the SCSI Cmd, thus a link with the ULP'S IO
1606 */
1607int
1608snic_abort_cmd(struct scsi_cmnd *sc)
1609{
1610 struct snic *snic = shost_priv(sc->device->host);
1611 int ret = SUCCESS, tag = snic_cmd_tag(sc);
1612 u32 start_time = jiffies;
1613
1614 SNIC_SCSI_DBG(snic->shost, "abt_cmd:sc %p :0x%x :req = %p :tag = %d\n",
1615 sc, sc->cmnd[0], sc->request, tag);
1616
1617 if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
1618 SNIC_HOST_ERR(snic->shost,
1619 "abt_cmd: tag %x Parent Devs are not rdy\n",
1620 tag);
1621 ret = FAST_IO_FAIL;
1622
1623 goto abort_end;
1624 }
1625
1626
1627 ret = snic_send_abort_and_wait(snic, sc);
1628 if (ret)
1629 goto abort_end;
1630
1631 ret = snic_abort_finish(snic, sc);
1632
1633abort_end:
1634 SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
1635 jiffies_to_msecs(jiffies - start_time), 0,
1636 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
1637
1638 SNIC_SCSI_DBG(snic->shost,
1639 "abts: Abort Req Status = %s\n",
1640 (ret == SUCCESS) ? "SUCCESS" :
1641 ((ret == FAST_IO_FAIL) ? "FAST_IO_FAIL" : "FAILED"));
1642
1643 return ret;
1644}
1645
1646
1647
1648static int
1649snic_is_abts_pending(struct snic *snic, struct scsi_cmnd *lr_sc)
1650{
1651 struct snic_req_info *rqi = NULL;
1652 struct scsi_cmnd *sc = NULL;
1653 struct scsi_device *lr_sdev = NULL;
1654 spinlock_t *io_lock = NULL;
1655 u32 tag;
1656 unsigned long flags;
1657
1658 if (lr_sc)
1659 lr_sdev = lr_sc->device;
1660
1661 /* walk through the tag map, an dcheck if IOs are still pending in fw*/
1662 for (tag = 0; tag < snic->max_tag_id; tag++) {
1663 io_lock = snic_io_lock_tag(snic, tag);
1664
1665 spin_lock_irqsave(io_lock, flags);
1666 sc = scsi_host_find_tag(snic->shost, tag);
1667
1668 if (!sc || (lr_sc && (sc->device != lr_sdev || sc == lr_sc))) {
1669 spin_unlock_irqrestore(io_lock, flags);
1670
1671 continue;
1672 }
1673
1674 rqi = (struct snic_req_info *) CMD_SP(sc);
1675 if (!rqi) {
1676 spin_unlock_irqrestore(io_lock, flags);
1677
1678 continue;
1679 }
1680
1681 /*
1682 * Found IO that is still pending w/ firmware and belongs to
1683 * the LUN that is under reset, if lr_sc != NULL
1684 */
1685 SNIC_SCSI_DBG(snic->shost, "Found IO in %s on LUN\n",
1686 snic_ioreq_state_to_str(CMD_STATE(sc)));
1687
1688 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
1689 spin_unlock_irqrestore(io_lock, flags);
1690
1691 return 1;
1692 }
1693
1694 spin_unlock_irqrestore(io_lock, flags);
1695 }
1696
1697 return 0;
1698} /* end of snic_is_abts_pending */
1699
1700static int
1701snic_dr_clean_single_req(struct snic *snic,
1702 u32 tag,
1703 struct scsi_device *lr_sdev)
1704{
1705 struct snic_req_info *rqi = NULL;
1706 struct snic_tgt *tgt = NULL;
1707 struct scsi_cmnd *sc = NULL;
1708 spinlock_t *io_lock = NULL;
1709 u32 sv_state = 0, tmf = 0;
1710 DECLARE_COMPLETION_ONSTACK(tm_done);
1711 unsigned long flags;
1712 int ret = 0;
1713
1714 io_lock = snic_io_lock_tag(snic, tag);
1715 spin_lock_irqsave(io_lock, flags);
1716 sc = scsi_host_find_tag(snic->shost, tag);
1717
1718 /* Ignore Cmd that don't belong to Lun Reset device */
1719 if (!sc || sc->device != lr_sdev)
1720 goto skip_clean;
1721
1722 rqi = (struct snic_req_info *) CMD_SP(sc);
1723
1724 if (!rqi)
1725 goto skip_clean;
1726
1727
1728 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
1729 goto skip_clean;
1730
1731
1732 if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) &&
1733 (!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) {
1734
1735 SNIC_SCSI_DBG(snic->shost,
1736 "clean_single_req: devrst is not pending sc 0x%p\n",
1737 sc);
1738
1739 goto skip_clean;
1740 }
1741
1742 SNIC_SCSI_DBG(snic->shost,
1743 "clean_single_req: Found IO in %s on lun\n",
1744 snic_ioreq_state_to_str(CMD_STATE(sc)));
1745
1746 /* Save Command State */
1747 sv_state = CMD_STATE(sc);
1748
1749 /*
1750 * Any pending IO issued prior to reset is expected to be
1751 * in abts pending state, if not we need to set SNIC_IOREQ_ABTS_PENDING
1752 * to indicate the IO is abort pending.
1753 * When IO is completed, the IO will be handed over and handled
1754 * in this function.
1755 */
1756
1757 CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
1758 SNIC_BUG_ON(rqi->abts_done);
1759
1760 if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) {
1761 rqi->tm_tag = SNIC_TAG_DEV_RST;
1762
1763 SNIC_SCSI_DBG(snic->shost,
1764 "clean_single_req:devrst sc 0x%p\n", sc);
1765 }
1766
1767 CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
1768 rqi->abts_done = &tm_done;
1769 spin_unlock_irqrestore(io_lock, flags);
1770
1771 tgt = starget_to_tgt(scsi_target(sc->device));
1772 if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN))
1773 tmf = SNIC_ITMF_ABTS_TASK_TERM;
1774 else
1775 tmf = SNIC_ITMF_ABTS_TASK;
1776
1777 /* Now queue the abort command to firmware */
1778 ret = snic_queue_abort_req(snic, rqi, sc, tmf);
1779 if (ret) {
1780 SNIC_HOST_ERR(snic->shost,
1781 "clean_single_req_err:sc %p, tag %d abt failed. tm_tag %d flags 0x%llx\n",
1782 sc, tag, rqi->tm_tag, CMD_FLAGS(sc));
1783
1784 spin_lock_irqsave(io_lock, flags);
1785 rqi = (struct snic_req_info *) CMD_SP(sc);
1786 if (rqi)
1787 rqi->abts_done = NULL;
1788
1789 /* Restore Command State */
1790 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
1791 CMD_STATE(sc) = sv_state;
1792
1793 ret = 1;
1794 goto skip_clean;
1795 }
1796
1797 spin_lock_irqsave(io_lock, flags);
1798 if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET)
1799 CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED;
1800
1801 CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED;
1802 spin_unlock_irqrestore(io_lock, flags);
1803
1804 wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT);
1805
1806 /* Recheck cmd state to check if it now aborted. */
1807 spin_lock_irqsave(io_lock, flags);
1808 rqi = (struct snic_req_info *) CMD_SP(sc);
1809 if (!rqi) {
1810 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
1811 goto skip_clean;
1812 }
1813 rqi->abts_done = NULL;
1814
1815 /* if abort is still pending w/ fw, fail */
1816 if (CMD_ABTS_STATUS(sc) == SNIC_INVALID_CODE) {
1817 SNIC_HOST_ERR(snic->shost,
1818 "clean_single_req_err:sc %p tag %d abt still pending w/ fw, tm_tag %d flags 0x%llx\n",
1819 sc, tag, rqi->tm_tag, CMD_FLAGS(sc));
1820
1821 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE;
1822 ret = 1;
1823
1824 goto skip_clean;
1825 }
1826
1827 CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
1828 CMD_SP(sc) = NULL;
1829 spin_unlock_irqrestore(io_lock, flags);
1830
1831 snic_release_req_buf(snic, rqi, sc);
1832
1833 ret = 0;
1834
1835 return ret;
1836
1837skip_clean:
1838 spin_unlock_irqrestore(io_lock, flags);
1839
1840 return ret;
1841} /* end of snic_dr_clean_single_req */
1842
1843static int
1844snic_dr_clean_pending_req(struct snic *snic, struct scsi_cmnd *lr_sc)
1845{
1846 struct scsi_device *lr_sdev = lr_sc->device;
1847 u32 tag = 0;
1848 int ret = FAILED;
1849
1850 for (tag = 0; tag < snic->max_tag_id; tag++) {
1851 if (tag == snic_cmd_tag(lr_sc))
1852 continue;
1853
1854 ret = snic_dr_clean_single_req(snic, tag, lr_sdev);
1855 if (ret) {
1856 SNIC_HOST_ERR(snic->shost, "clean_err:tag = %d\n", tag);
1857
1858 goto clean_err;
1859 }
1860 }
1861
1862 schedule_timeout(msecs_to_jiffies(100));
1863
1864 /* Walk through all the cmds and check abts status. */
1865 if (snic_is_abts_pending(snic, lr_sc)) {
1866 ret = FAILED;
1867
1868 goto clean_err;
1869 }
1870
1871 ret = 0;
1872 SNIC_SCSI_DBG(snic->shost, "clean_pending_req: Success.\n");
1873
1874 return ret;
1875
1876clean_err:
1877 ret = FAILED;
1878 SNIC_HOST_ERR(snic->shost,
1879 "Failed to Clean Pending IOs on %s device.\n",
1880 dev_name(&lr_sdev->sdev_gendev));
1881
1882 return ret;
1883
1884} /* end of snic_dr_clean_pending_req */
1885
1886/*
1887 * snic_dr_finish : Called by snic_device_reset
1888 */
1889static int
1890snic_dr_finish(struct snic *snic, struct scsi_cmnd *sc)
1891{
1892 struct snic_req_info *rqi = NULL;
1893 spinlock_t *io_lock = NULL;
1894 unsigned long flags;
1895 int lr_res = 0;
1896 int ret = FAILED;
1897
1898 io_lock = snic_io_lock_hash(snic, sc);
1899 spin_lock_irqsave(io_lock, flags);
1900 rqi = (struct snic_req_info *) CMD_SP(sc);
1901 if (!rqi) {
1902 spin_unlock_irqrestore(io_lock, flags);
1903 SNIC_SCSI_DBG(snic->shost,
1904 "dr_fini: rqi is null tag 0x%x sc 0x%p flags 0x%llx\n",
1905 snic_cmd_tag(sc), sc, CMD_FLAGS(sc));
1906
1907 ret = FAILED;
1908 goto dr_fini_end;
1909 }
1910
1911 rqi->dr_done = NULL;
1912
1913 lr_res = CMD_LR_STATUS(sc);
1914
1915 switch (lr_res) {
1916 case SNIC_INVALID_CODE:
1917 /* stats */
1918 SNIC_SCSI_DBG(snic->shost,
1919 "dr_fini: Tag %x Dev Reset Timedout. flags 0x%llx\n",
1920 snic_cmd_tag(sc), CMD_FLAGS(sc));
1921
1922 CMD_FLAGS(sc) |= SNIC_DEV_RST_TIMEDOUT;
1923 ret = FAILED;
1924
1925 goto dr_failed;
1926
1927 case SNIC_STAT_IO_SUCCESS:
1928 SNIC_SCSI_DBG(snic->shost,
1929 "dr_fini: Tag %x Dev Reset cmpl\n",
1930 snic_cmd_tag(sc));
1931 ret = 0;
1932 break;
1933
1934 default:
1935 SNIC_HOST_ERR(snic->shost,
1936 "dr_fini:Device Reset completed& failed.Tag = %x lr_status %s flags 0x%llx\n",
1937 snic_cmd_tag(sc),
1938 snic_io_status_to_str(lr_res), CMD_FLAGS(sc));
1939 ret = FAILED;
1940 goto dr_failed;
1941 }
1942 spin_unlock_irqrestore(io_lock, flags);
1943
1944 /*
1945 * Cleanup any IOs on this LUN that have still not completed.
1946 * If any of these fail, then LUN Reset fails.
1947 * Cleanup cleans all commands on this LUN except
1948 * the lun reset command. If all cmds get cleaned, the LUN Reset
1949 * succeeds.
1950 */
1951
1952 ret = snic_dr_clean_pending_req(snic, sc);
1953 if (ret) {
1954 spin_lock_irqsave(io_lock, flags);
1955 SNIC_SCSI_DBG(snic->shost,
1956 "dr_fini: Device Reset Failed since could not abort all IOs. Tag = %x.\n",
1957 snic_cmd_tag(sc));
1958 rqi = (struct snic_req_info *) CMD_SP(sc);
1959
1960 goto dr_failed;
1961 } else {
1962 /* Cleanup LUN Reset Command */
1963 spin_lock_irqsave(io_lock, flags);
1964 rqi = (struct snic_req_info *) CMD_SP(sc);
1965 if (rqi)
1966 ret = SUCCESS; /* Completed Successfully */
1967 else
1968 ret = FAILED;
1969 }
1970
1971dr_failed:
1972 SNIC_BUG_ON(!spin_is_locked(io_lock));
1973 if (rqi)
1974 CMD_SP(sc) = NULL;
1975 spin_unlock_irqrestore(io_lock, flags);
1976
1977 if (rqi)
1978 snic_release_req_buf(snic, rqi, sc);
1979
1980dr_fini_end:
1981 return ret;
1982} /* end of snic_dr_finish */
1983
1984static int
1985snic_queue_dr_req(struct snic *snic,
1986 struct snic_req_info *rqi,
1987 struct scsi_cmnd *sc)
1988{
1989 /* Add special tag for device reset */
1990 rqi->tm_tag |= SNIC_TAG_DEV_RST;
1991
1992 return snic_issue_tm_req(snic, rqi, sc, SNIC_ITMF_LUN_RESET);
1993}
1994
1995static int
1996snic_send_dr_and_wait(struct snic *snic, struct scsi_cmnd *sc)
1997{
1998 struct snic_req_info *rqi = NULL;
1999 enum snic_ioreq_state sv_state;
2000 spinlock_t *io_lock = NULL;
2001 unsigned long flags;
2002 DECLARE_COMPLETION_ONSTACK(tm_done);
2003 int ret = FAILED, tag = snic_cmd_tag(sc);
2004
2005 io_lock = snic_io_lock_hash(snic, sc);
2006 spin_lock_irqsave(io_lock, flags);
2007 CMD_FLAGS(sc) |= SNIC_DEVICE_RESET;
2008 rqi = (struct snic_req_info *) CMD_SP(sc);
2009 if (!rqi) {
2010 SNIC_HOST_ERR(snic->shost,
2011 "send_dr: rqi is null, Tag 0x%x flags 0x%llx\n",
2012 tag, CMD_FLAGS(sc));
2013 spin_unlock_irqrestore(io_lock, flags);
2014
2015 ret = FAILED;
2016 goto send_dr_end;
2017 }
2018
2019 /* Save Command state to restore in case Queuing failed. */
2020 sv_state = CMD_STATE(sc);
2021
2022 CMD_STATE(sc) = SNIC_IOREQ_LR_PENDING;
2023 CMD_LR_STATUS(sc) = SNIC_INVALID_CODE;
2024
2025 SNIC_SCSI_DBG(snic->shost, "dr: TAG = %x\n", tag);
2026
2027 rqi->dr_done = &tm_done;
2028 SNIC_BUG_ON(!rqi->dr_done);
2029
2030 spin_unlock_irqrestore(io_lock, flags);
2031 /*
2032 * The Command state is changed to IOREQ_PENDING,
2033 * in this case, if the command is completed, the icmnd_cmpl will
2034 * mark the cmd as completed.
2035 * This logic still makes LUN Reset is inevitable.
2036 */
2037
2038 ret = snic_queue_dr_req(snic, rqi, sc);
2039 if (ret) {
2040 SNIC_HOST_ERR(snic->shost,
2041 "send_dr: IO w/ Tag 0x%x Failed err = %d. flags 0x%llx\n",
2042 tag, ret, CMD_FLAGS(sc));
2043
2044 spin_lock_irqsave(io_lock, flags);
2045 /* Restore State */
2046 CMD_STATE(sc) = sv_state;
2047 rqi = (struct snic_req_info *) CMD_SP(sc);
2048 if (rqi)
2049 rqi->dr_done = NULL;
2050 /* rqi is freed in caller. */
2051 spin_unlock_irqrestore(io_lock, flags);
2052 ret = FAILED;
2053
2054 goto send_dr_end;
2055 }
2056
2057 spin_lock_irqsave(io_lock, flags);
2058 CMD_FLAGS(sc) |= SNIC_DEV_RST_ISSUED;
2059 spin_unlock_irqrestore(io_lock, flags);
2060
2061 ret = 0;
2062
2063 wait_for_completion_timeout(&tm_done, SNIC_LUN_RESET_TIMEOUT);
2064
2065send_dr_end:
2066 return ret;
2067}
2068
2069/*
2070 * auxillary funciton to check lun reset op is supported or not
2071 * Not supported if returns 0
2072 */
2073static int
2074snic_dev_reset_supported(struct scsi_device *sdev)
2075{
2076 struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
2077
2078 if (tgt->tdata.typ == SNIC_TGT_DAS)
2079 return 0;
2080
2081 return 1;
2082}
2083
2084static void
2085snic_unlink_and_release_req(struct snic *snic, struct scsi_cmnd *sc, int flag)
2086{
2087 struct snic_req_info *rqi = NULL;
2088 spinlock_t *io_lock = NULL;
2089 unsigned long flags;
2090 u32 start_time = jiffies;
2091
2092 io_lock = snic_io_lock_hash(snic, sc);
2093 spin_lock_irqsave(io_lock, flags);
2094 rqi = (struct snic_req_info *) CMD_SP(sc);
2095 if (rqi) {
2096 start_time = rqi->start_time;
2097 CMD_SP(sc) = NULL;
2098 }
2099
2100 CMD_FLAGS(sc) |= flag;
2101 spin_unlock_irqrestore(io_lock, flags);
2102
2103 if (rqi)
2104 snic_release_req_buf(snic, rqi, sc);
2105
2106 SNIC_TRC(snic->shost->host_no, snic_cmd_tag(sc), (ulong) sc,
2107 jiffies_to_msecs(jiffies - start_time), (ulong) rqi,
2108 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
2109}
2110
2111/*
2112 * SCSI Eh thread issues a LUN Reset when one or more commands on a LUN
2113 * fail to get aborted. It calls driver's eh_device_reset with a SCSI
2114 * command on the LUN.
2115 */
2116int
2117snic_device_reset(struct scsi_cmnd *sc)
2118{
2119 struct Scsi_Host *shost = sc->device->host;
2120 struct snic *snic = shost_priv(shost);
2121 struct snic_req_info *rqi = NULL;
2122 int tag = snic_cmd_tag(sc);
2123 int start_time = jiffies;
2124 int ret = FAILED;
2125 int dr_supp = 0;
2126
2127 SNIC_SCSI_DBG(shost, "dev_reset:sc %p :0x%x :req = %p :tag = %d\n",
2128 sc, sc->cmnd[0], sc->request,
2129 snic_cmd_tag(sc));
2130 dr_supp = snic_dev_reset_supported(sc->device);
2131 if (!dr_supp) {
2132 /* device reset op is not supported */
2133 SNIC_HOST_INFO(shost, "LUN Reset Op not supported.\n");
2134 snic_unlink_and_release_req(snic, sc, SNIC_DEV_RST_NOTSUP);
2135
2136 goto dev_rst_end;
2137 }
2138
2139 if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
2140 snic_unlink_and_release_req(snic, sc, 0);
2141 SNIC_HOST_ERR(shost, "Devrst: Parent Devs are not online.\n");
2142
2143 goto dev_rst_end;
2144 }
2145
2146 /* There is no tag when lun reset is issue through ioctl. */
2147 if (unlikely(tag <= SNIC_NO_TAG)) {
2148 SNIC_HOST_INFO(snic->shost,
2149 "Devrst: LUN Reset Recvd thru IOCTL.\n");
2150
2151 rqi = snic_req_init(snic, 0);
2152 if (!rqi)
2153 goto dev_rst_end;
2154
2155 memset(scsi_cmd_priv(sc), 0,
2156 sizeof(struct snic_internal_io_state));
2157 CMD_SP(sc) = (char *)rqi;
2158 CMD_FLAGS(sc) = SNIC_NO_FLAGS;
2159
2160 /* Add special tag for dr coming from user spc */
2161 rqi->tm_tag = SNIC_TAG_IOCTL_DEV_RST;
2162 rqi->sc = sc;
2163 }
2164
2165 ret = snic_send_dr_and_wait(snic, sc);
2166 if (ret) {
2167 SNIC_HOST_ERR(snic->shost,
2168 "Devrst: IO w/ Tag %x Failed w/ err = %d\n",
2169 tag, ret);
2170
2171 snic_unlink_and_release_req(snic, sc, 0);
2172
2173 goto dev_rst_end;
2174 }
2175
2176 ret = snic_dr_finish(snic, sc);
2177
2178dev_rst_end:
2179 SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
2180 jiffies_to_msecs(jiffies - start_time),
2181 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
2182
2183 SNIC_SCSI_DBG(snic->shost,
2184 "Devrst: Returning from Device Reset : %s\n",
2185 (ret == SUCCESS) ? "SUCCESS" : "FAILED");
2186
2187 return ret;
2188} /* end of snic_device_reset */
2189
2190/*
2191 * SCSI Error handling calls driver's eh_host_reset if all prior
2192 * error handling levels return FAILED.
2193 *
2194 * Host Reset is the highest level of error recovery. If this fails, then
2195 * host is offlined by SCSI.
2196 */
2197/*
2198 * snic_issue_hba_reset : Queues FW Reset Request.
2199 */
2200static int
2201snic_issue_hba_reset(struct snic *snic, struct scsi_cmnd *sc)
2202{
2203 struct snic_req_info *rqi = NULL;
2204 struct snic_host_req *req = NULL;
2205 spinlock_t *io_lock = NULL;
2206 DECLARE_COMPLETION_ONSTACK(wait);
2207 unsigned long flags;
2208 int ret = -ENOMEM;
2209
2210 rqi = snic_req_init(snic, 0);
2211 if (!rqi) {
2212 ret = -ENOMEM;
2213
2214 goto hba_rst_end;
2215 }
2216
2217 if (snic_cmd_tag(sc) == SCSI_NO_TAG) {
2218 memset(scsi_cmd_priv(sc), 0,
2219 sizeof(struct snic_internal_io_state));
2220 SNIC_HOST_INFO(snic->shost, "issu_hr:Host reset thru ioctl.\n");
2221 rqi->sc = sc;
2222 }
2223
2224 req = rqi_to_req(rqi);
2225
2226 io_lock = snic_io_lock_hash(snic, sc);
2227 spin_lock_irqsave(io_lock, flags);
2228 SNIC_BUG_ON(CMD_SP(sc) != NULL);
2229 CMD_STATE(sc) = SNIC_IOREQ_PENDING;
2230 CMD_SP(sc) = (char *) rqi;
2231 CMD_FLAGS(sc) |= SNIC_IO_INITIALIZED;
2232 snic->remove_wait = &wait;
2233 spin_unlock_irqrestore(io_lock, flags);
2234
2235 /* Initialize Request */
2236 snic_io_hdr_enc(&req->hdr, SNIC_REQ_HBA_RESET, 0, snic_cmd_tag(sc),
2237 snic->config.hid, 0, (ulong) rqi);
2238
2239 req->u.reset.flags = 0;
2240
2241 ret = snic_queue_wq_desc(snic, req, sizeof(*req));
2242 if (ret) {
2243 SNIC_HOST_ERR(snic->shost,
2244 "issu_hr:Queuing HBA Reset Failed. w err %d\n",
2245 ret);
2246
2247 goto hba_rst_err;
2248 }
2249
2250 spin_lock_irqsave(io_lock, flags);
2251 CMD_FLAGS(sc) |= SNIC_HOST_RESET_ISSUED;
2252 spin_unlock_irqrestore(io_lock, flags);
2253 atomic64_inc(&snic->s_stats.reset.hba_resets);
2254 SNIC_HOST_INFO(snic->shost, "Queued HBA Reset Successfully.\n");
2255
2256 wait_for_completion_timeout(snic->remove_wait,
2257 SNIC_HOST_RESET_TIMEOUT);
2258
2259 if (snic_get_state(snic) == SNIC_FWRESET) {
2260 SNIC_HOST_ERR(snic->shost, "reset_cmpl: Reset Timedout.\n");
2261 ret = -ETIMEDOUT;
2262
2263 goto hba_rst_err;
2264 }
2265
2266 spin_lock_irqsave(io_lock, flags);
2267 snic->remove_wait = NULL;
2268 rqi = (struct snic_req_info *) CMD_SP(sc);
2269 CMD_SP(sc) = NULL;
2270 spin_unlock_irqrestore(io_lock, flags);
2271
2272 if (rqi)
2273 snic_req_free(snic, rqi);
2274
2275 ret = 0;
2276
2277 return ret;
2278
2279hba_rst_err:
2280 spin_lock_irqsave(io_lock, flags);
2281 snic->remove_wait = NULL;
2282 rqi = (struct snic_req_info *) CMD_SP(sc);
2283 CMD_SP(sc) = NULL;
2284 spin_unlock_irqrestore(io_lock, flags);
2285
2286 if (rqi)
2287 snic_req_free(snic, rqi);
2288
2289hba_rst_end:
2290 SNIC_HOST_ERR(snic->shost,
2291 "reset:HBA Reset Failed w/ err = %d.\n",
2292 ret);
2293
2294 return ret;
2295} /* end of snic_issue_hba_reset */
2296
2297int
2298snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
2299{
2300 struct snic *snic = shost_priv(shost);
2301 enum snic_state sv_state;
2302 unsigned long flags;
2303 int ret = FAILED;
2304
2305 /* Set snic state as SNIC_FWRESET*/
2306 sv_state = snic_get_state(snic);
2307
2308 spin_lock_irqsave(&snic->snic_lock, flags);
2309 if (snic_get_state(snic) == SNIC_FWRESET) {
2310 spin_unlock_irqrestore(&snic->snic_lock, flags);
2311 SNIC_HOST_INFO(shost, "reset:prev reset is in progres\n");
2312
2313 msleep(SNIC_HOST_RESET_TIMEOUT);
2314 ret = SUCCESS;
2315
2316 goto reset_end;
2317 }
2318
2319 snic_set_state(snic, SNIC_FWRESET);
2320 spin_unlock_irqrestore(&snic->snic_lock, flags);
2321
2322
2323 /* Wait for all the IOs that are entered in Qcmd */
2324 while (atomic_read(&snic->ios_inflight))
2325 schedule_timeout(msecs_to_jiffies(1));
2326
2327 ret = snic_issue_hba_reset(snic, sc);
2328 if (ret) {
2329 SNIC_HOST_ERR(shost,
2330 "reset:Host Reset Failed w/ err %d.\n",
2331 ret);
2332 spin_lock_irqsave(&snic->snic_lock, flags);
2333 snic_set_state(snic, sv_state);
2334 spin_unlock_irqrestore(&snic->snic_lock, flags);
2335 atomic64_inc(&snic->s_stats.reset.hba_reset_fail);
2336 ret = FAILED;
2337
2338 goto reset_end;
2339 }
2340
2341 ret = SUCCESS;
2342
2343reset_end:
2344 return ret;
2345} /* end of snic_reset */
2346
2347/*
2348 * SCSI Error handling calls driver's eh_host_reset if all prior
2349 * error handling levels return FAILED.
2350 *
2351 * Host Reset is the highest level of error recovery. If this fails, then
2352 * host is offlined by SCSI.
2353 */
2354int
2355snic_host_reset(struct scsi_cmnd *sc)
2356{
2357 struct Scsi_Host *shost = sc->device->host;
2358 u32 start_time = jiffies;
2359 int ret = FAILED;
2360
2361 SNIC_SCSI_DBG(shost,
2362 "host reset:sc %p sc_cmd 0x%x req %p tag %d flags 0x%llx\n",
2363 sc, sc->cmnd[0], sc->request,
2364 snic_cmd_tag(sc), CMD_FLAGS(sc));
2365
2366 ret = snic_reset(shost, sc);
2367
2368 SNIC_TRC(shost->host_no, snic_cmd_tag(sc), (ulong) sc,
2369 jiffies_to_msecs(jiffies - start_time),
2370 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
2371
2372 return ret;
2373} /* end of snic_host_reset */
2374
2375/*
2376 * snic_cmpl_pending_tmreq : Caller should hold io_lock
2377 */
2378static void
2379snic_cmpl_pending_tmreq(struct snic *snic, struct scsi_cmnd *sc)
2380{
2381 struct snic_req_info *rqi = NULL;
2382
2383 SNIC_SCSI_DBG(snic->shost,
2384 "Completing Pending TM Req sc %p, state %s flags 0x%llx\n",
2385 sc, snic_io_status_to_str(CMD_STATE(sc)), CMD_FLAGS(sc));
2386
2387 rqi = (struct snic_req_info *) CMD_SP(sc);
2388 if (!rqi)
2389 return;
2390
2391 if (rqi->dr_done)
2392 complete(rqi->dr_done);
2393 else if (rqi->abts_done)
2394 complete(rqi->abts_done);
2395}
2396
2397/*
2398 * snic_scsi_cleanup: Walks through tag map and releases the reqs
2399 */
2400static void
2401snic_scsi_cleanup(struct snic *snic, int ex_tag)
2402{
2403 struct snic_req_info *rqi = NULL;
2404 struct scsi_cmnd *sc = NULL;
2405 spinlock_t *io_lock = NULL;
2406 unsigned long flags;
2407 int tag;
2408 u64 st_time = 0;
2409
2410 SNIC_SCSI_DBG(snic->shost, "sc_clean: scsi cleanup.\n");
2411
2412 for (tag = 0; tag < snic->max_tag_id; tag++) {
2413 /* Skip ex_tag */
2414 if (tag == ex_tag)
2415 continue;
2416
2417 io_lock = snic_io_lock_tag(snic, tag);
2418 spin_lock_irqsave(io_lock, flags);
2419 sc = scsi_host_find_tag(snic->shost, tag);
2420 if (!sc) {
2421 spin_unlock_irqrestore(io_lock, flags);
2422
2423 continue;
2424 }
2425
2426 if (unlikely(snic_tmreq_pending(sc))) {
2427 /*
2428 * When FW Completes reset w/o sending completions
2429 * for outstanding ios.
2430 */
2431 snic_cmpl_pending_tmreq(snic, sc);
2432 spin_unlock_irqrestore(io_lock, flags);
2433
2434 continue;
2435 }
2436
2437 rqi = (struct snic_req_info *) CMD_SP(sc);
2438 if (!rqi) {
2439 spin_unlock_irqrestore(io_lock, flags);
2440
2441 goto cleanup;
2442 }
2443
2444 SNIC_SCSI_DBG(snic->shost,
2445 "sc_clean: sc %p, rqi %p, tag %d flags 0x%llx\n",
2446 sc, rqi, tag, CMD_FLAGS(sc));
2447
2448 CMD_SP(sc) = NULL;
2449 CMD_FLAGS(sc) |= SNIC_SCSI_CLEANUP;
2450 spin_unlock_irqrestore(io_lock, flags);
2451 st_time = rqi->start_time;
2452
2453 SNIC_HOST_INFO(snic->shost,
2454 "sc_clean: Releasing rqi %p : flags 0x%llx\n",
2455 rqi, CMD_FLAGS(sc));
2456
2457 snic_release_req_buf(snic, rqi, sc);
2458
2459cleanup:
2460 sc->result = DID_TRANSPORT_DISRUPTED << 16;
2461 SNIC_HOST_INFO(snic->shost,
2462 "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p. rqi %p duration %llu msecs\n",
2463 sc, rqi, (jiffies - st_time));
2464
2465 /* Update IO stats */
2466 snic_stats_update_io_cmpl(&snic->s_stats);
2467
2468 if (sc->scsi_done) {
2469 SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
2470 jiffies_to_msecs(jiffies - st_time), 0,
2471 SNIC_TRC_CMD(sc),
2472 SNIC_TRC_CMD_STATE_FLAGS(sc));
2473
2474 sc->scsi_done(sc);
2475 }
2476 }
2477} /* end of snic_scsi_cleanup */
2478
2479void
2480snic_shutdown_scsi_cleanup(struct snic *snic)
2481{
2482 SNIC_HOST_INFO(snic->shost, "Shutdown time SCSI Cleanup.\n");
2483
2484 snic_scsi_cleanup(snic, SCSI_NO_TAG);
2485} /* end of snic_shutdown_scsi_cleanup */
2486
2487/*
2488 * snic_internal_abort_io
2489 * called by : snic_tgt_scsi_abort_io
2490 */
2491static int
2492snic_internal_abort_io(struct snic *snic, struct scsi_cmnd *sc, int tmf)
2493{
2494 struct snic_req_info *rqi = NULL;
2495 spinlock_t *io_lock = NULL;
2496 unsigned long flags;
2497 u32 sv_state = 0;
2498 int ret = 0;
2499
2500 io_lock = snic_io_lock_hash(snic, sc);
2501 spin_lock_irqsave(io_lock, flags);
2502 rqi = (struct snic_req_info *) CMD_SP(sc);
2503 if (!rqi)
2504 goto skip_internal_abts;
2505
2506 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
2507 goto skip_internal_abts;
2508
2509 if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) &&
2510 (!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) {
2511
2512 SNIC_SCSI_DBG(snic->shost,
2513 "internal_abts: dev rst not pending sc 0x%p\n",
2514 sc);
2515
2516 goto skip_internal_abts;
2517 }
2518
2519
2520 if (!(CMD_FLAGS(sc) & SNIC_IO_ISSUED)) {
2521 SNIC_SCSI_DBG(snic->shost,
2522 "internal_abts: IO not yet issued sc 0x%p tag 0x%x flags 0x%llx state %d\n",
2523 sc, snic_cmd_tag(sc), CMD_FLAGS(sc), CMD_STATE(sc));
2524
2525 goto skip_internal_abts;
2526 }
2527
2528 sv_state = CMD_STATE(sc);
2529 CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
2530 CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
2531 CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_PENDING;
2532
2533 if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) {
2534 /* stats */
2535 rqi->tm_tag = SNIC_TAG_DEV_RST;
2536 SNIC_SCSI_DBG(snic->shost, "internal_abts:dev rst sc %p\n", sc);
2537 }
2538
2539 SNIC_SCSI_DBG(snic->shost, "internal_abts: Issuing abts tag %x\n",
2540 snic_cmd_tag(sc));
2541 SNIC_BUG_ON(rqi->abts_done);
2542 spin_unlock_irqrestore(io_lock, flags);
2543
2544 ret = snic_queue_abort_req(snic, rqi, sc, tmf);
2545 if (ret) {
2546 SNIC_HOST_ERR(snic->shost,
2547 "internal_abts: Tag = %x , Failed w/ err = %d\n",
2548 snic_cmd_tag(sc), ret);
2549
2550 spin_lock_irqsave(io_lock, flags);
2551
2552 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
2553 CMD_STATE(sc) = sv_state;
2554
2555 goto skip_internal_abts;
2556 }
2557
2558 spin_lock_irqsave(io_lock, flags);
2559 if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET)
2560 CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED;
2561 else
2562 CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED;
2563
2564 ret = SUCCESS;
2565
2566skip_internal_abts:
2567 SNIC_BUG_ON(!spin_is_locked(io_lock));
2568 spin_unlock_irqrestore(io_lock, flags);
2569
2570 return ret;
2571} /* end of snic_internal_abort_io */
2572
2573/*
2574 * snic_tgt_scsi_abort_io : called by snic_tgt_del
2575 */
2576int
2577snic_tgt_scsi_abort_io(struct snic_tgt *tgt)
2578{
2579 struct snic *snic = NULL;
2580 struct scsi_cmnd *sc = NULL;
2581 struct snic_tgt *sc_tgt = NULL;
2582 spinlock_t *io_lock = NULL;
2583 unsigned long flags;
2584 int ret = 0, tag, abt_cnt = 0, tmf = 0;
2585
2586 if (!tgt)
2587 return -1;
2588
2589 snic = shost_priv(snic_tgt_to_shost(tgt));
2590 SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: Cleaning Pending IOs.\n");
2591
2592 if (tgt->tdata.typ == SNIC_TGT_DAS)
2593 tmf = SNIC_ITMF_ABTS_TASK;
2594 else
2595 tmf = SNIC_ITMF_ABTS_TASK_TERM;
2596
2597 for (tag = 0; tag < snic->max_tag_id; tag++) {
2598 io_lock = snic_io_lock_tag(snic, tag);
2599
2600 spin_lock_irqsave(io_lock, flags);
2601 sc = scsi_host_find_tag(snic->shost, tag);
2602 if (!sc) {
2603 spin_unlock_irqrestore(io_lock, flags);
2604
2605 continue;
2606 }
2607
2608 sc_tgt = starget_to_tgt(scsi_target(sc->device));
2609 if (sc_tgt != tgt) {
2610 spin_unlock_irqrestore(io_lock, flags);
2611
2612 continue;
2613 }
2614 spin_unlock_irqrestore(io_lock, flags);
2615
2616 ret = snic_internal_abort_io(snic, sc, tmf);
2617 if (ret < 0) {
2618 SNIC_HOST_ERR(snic->shost,
2619 "tgt_abt_io: Tag %x, Failed w err = %d\n",
2620 tag, ret);
2621
2622 continue;
2623 }
2624
2625 if (ret == SUCCESS)
2626 abt_cnt++;
2627 }
2628
2629 SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: abt_cnt = %d\n", abt_cnt);
2630
2631 return 0;
2632} /* end of snic_tgt_scsi_abort_io */
diff --git a/drivers/scsi/snic/snic_stats.h b/drivers/scsi/snic/snic_stats.h
new file mode 100644
index 000000000000..11e614849a82
--- /dev/null
+++ b/drivers/scsi/snic/snic_stats.h
@@ -0,0 +1,123 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef __SNIC_STATS_H
19#define __SNIC_STATS_H
20
21struct snic_io_stats {
22 atomic64_t active; /* Active IOs */
23 atomic64_t max_active; /* Max # active IOs */
24 atomic64_t max_sgl; /* Max # SGLs for any IO */
25 atomic64_t max_time; /* Max time to process IO */
26 atomic64_t max_qtime; /* Max time to Queue the IO */
27 atomic64_t max_cmpl_time; /* Max time to complete the IO */
28 atomic64_t sgl_cnt[SNIC_MAX_SG_DESC_CNT]; /* SGL Counters */
29 atomic64_t max_io_sz; /* Max IO Size */
30 atomic64_t compl; /* IO Completions */
31 atomic64_t fail; /* IO Failures */
32 atomic64_t req_null; /* req or req info is NULL */
33 atomic64_t alloc_fail; /* Alloc Failures */
34 atomic64_t sc_null;
35 atomic64_t io_not_found; /* IO Not Found */
36 atomic64_t num_ios; /* Number of IOs */
37};
38
39struct snic_abort_stats {
40 atomic64_t num; /* Abort counter */
41 atomic64_t fail; /* Abort Failure Counter */
42 atomic64_t drv_tmo; /* Abort Driver Timeouts */
43 atomic64_t fw_tmo; /* Abort Firmware Timeouts */
44 atomic64_t io_not_found;/* Abort IO Not Found */
45};
46
47struct snic_reset_stats {
48 atomic64_t dev_resets; /* Device Reset Counter */
49 atomic64_t dev_reset_fail; /* Device Reset Failures */
50 atomic64_t dev_reset_aborts; /* Device Reset Aborts */
51 atomic64_t dev_reset_tmo; /* Device Reset Timeout */
52 atomic64_t dev_reset_terms; /* Device Reset terminate */
53 atomic64_t hba_resets; /* hba/firmware resets */
54 atomic64_t hba_reset_cmpl; /* hba/firmware reset completions */
55 atomic64_t hba_reset_fail; /* hba/firmware failures */
56 atomic64_t snic_resets; /* snic resets */
57 atomic64_t snic_reset_compl; /* snic reset completions */
58 atomic64_t snic_reset_fail; /* snic reset failures */
59};
60
61struct snic_fw_stats {
62 atomic64_t actv_reqs; /* Active Requests */
63 atomic64_t max_actv_reqs; /* Max Active Requests */
64 atomic64_t out_of_res; /* Firmware Out Of Resources */
65 atomic64_t io_errs; /* Firmware IO Firmware Errors */
66 atomic64_t scsi_errs; /* Target hits check condition */
67};
68
69struct snic_misc_stats {
70 u64 last_isr_time;
71 u64 last_ack_time;
72 atomic64_t isr_cnt;
73 atomic64_t max_cq_ents; /* Max CQ Entries */
74 atomic64_t data_cnt_mismat; /* Data Count Mismatch */
75 atomic64_t io_tmo;
76 atomic64_t io_aborted;
77 atomic64_t sgl_inval; /* SGL Invalid */
78 atomic64_t abts_wq_alloc_fail; /* Abort Path WQ desc alloc failure */
79 atomic64_t devrst_wq_alloc_fail;/* Device Reset - WQ desc alloc fail */
80 atomic64_t wq_alloc_fail; /* IO WQ desc alloc failure */
81 atomic64_t no_icmnd_itmf_cmpls;
82 atomic64_t io_under_run;
83 atomic64_t qfull;
84 atomic64_t tgt_not_rdy;
85};
86
87struct snic_stats {
88 struct snic_io_stats io;
89 struct snic_abort_stats abts;
90 struct snic_reset_stats reset;
91 struct snic_fw_stats fw;
92 struct snic_misc_stats misc;
93 atomic64_t io_cmpl_skip;
94};
95
96int snic_stats_debugfs_init(struct snic *);
97void snic_stats_debugfs_remove(struct snic *);
98
99/* Auxillary function to update active IO counter */
100static inline void
101snic_stats_update_active_ios(struct snic_stats *s_stats)
102{
103 struct snic_io_stats *io = &s_stats->io;
104 u32 nr_active_ios;
105
106 nr_active_ios = atomic64_inc_return(&io->active);
107 if (atomic64_read(&io->max_active) < nr_active_ios)
108 atomic64_set(&io->max_active, nr_active_ios);
109
110 atomic64_inc(&io->num_ios);
111}
112
113/* Auxillary function to update IO completion counter */
114static inline void
115snic_stats_update_io_cmpl(struct snic_stats *s_stats)
116{
117 atomic64_dec(&s_stats->io.active);
118 if (unlikely(atomic64_read(&s_stats->io_cmpl_skip)))
119 atomic64_dec(&s_stats->io_cmpl_skip);
120 else
121 atomic64_inc(&s_stats->io.compl);
122}
123#endif /* __SNIC_STATS_H */
diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c
new file mode 100644
index 000000000000..28a40a7ade38
--- /dev/null
+++ b/drivers/scsi/snic/snic_trc.c
@@ -0,0 +1,181 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#include <linux/module.h>
19#include <linux/mempool.h>
20#include <linux/errno.h>
21#include <linux/vmalloc.h>
22
23#include "snic_io.h"
24#include "snic.h"
25
26/*
27 * snic_get_trc_buf : Allocates a trace record and returns.
28 */
29struct snic_trc_data *
30snic_get_trc_buf(void)
31{
32 struct snic_trc *trc = &snic_glob->trc;
33 struct snic_trc_data *td = NULL;
34 unsigned long flags;
35
36 spin_lock_irqsave(&trc->lock, flags);
37 td = &trc->buf[trc->wr_idx];
38 trc->wr_idx++;
39
40 if (trc->wr_idx == trc->max_idx)
41 trc->wr_idx = 0;
42
43 if (trc->wr_idx != trc->rd_idx) {
44 spin_unlock_irqrestore(&trc->lock, flags);
45
46 goto end;
47 }
48
49 trc->rd_idx++;
50 if (trc->rd_idx == trc->max_idx)
51 trc->rd_idx = 0;
52
53 td->ts = 0; /* Marker for checking the record, for complete data*/
54 spin_unlock_irqrestore(&trc->lock, flags);
55
56end:
57
58 return td;
59} /* end of snic_get_trc_buf */
60
61/*
62 * snic_fmt_trc_data : Formats trace data for printing.
63 */
64static int
65snic_fmt_trc_data(struct snic_trc_data *td, char *buf, int buf_sz)
66{
67 int len = 0;
68 struct timespec tmspec;
69
70 jiffies_to_timespec(td->ts, &tmspec);
71
72 len += snprintf(buf, buf_sz,
73 "%lu.%10lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n",
74 tmspec.tv_sec,
75 tmspec.tv_nsec,
76 td->fn,
77 td->hno,
78 td->tag,
79 td->data[0], td->data[1], td->data[2], td->data[3],
80 td->data[4]);
81
82 return len;
83} /* end of snic_fmt_trc_data */
84
85/*
86 * snic_get_trc_data : Returns a formatted trace buffer.
87 */
88int
89snic_get_trc_data(char *buf, int buf_sz)
90{
91 struct snic_trc_data *td = NULL;
92 struct snic_trc *trc = &snic_glob->trc;
93 unsigned long flags;
94
95 spin_lock_irqsave(&trc->lock, flags);
96 if (trc->rd_idx == trc->wr_idx) {
97 spin_unlock_irqrestore(&trc->lock, flags);
98
99 return -1;
100 }
101 td = &trc->buf[trc->rd_idx];
102
103 if (td->ts == 0) {
104 /* write in progress. */
105 spin_unlock_irqrestore(&trc->lock, flags);
106
107 return -1;
108 }
109
110 trc->rd_idx++;
111 if (trc->rd_idx == trc->max_idx)
112 trc->rd_idx = 0;
113 spin_unlock_irqrestore(&trc->lock, flags);
114
115 return snic_fmt_trc_data(td, buf, buf_sz);
116} /* end of snic_get_trc_data */
117
118/*
119 * snic_trc_init() : Configures Trace Functionality for snic.
120 */
121int
122snic_trc_init(void)
123{
124 struct snic_trc *trc = &snic_glob->trc;
125 void *tbuf = NULL;
126 int tbuf_sz = 0, ret;
127
128 tbuf_sz = (snic_trace_max_pages * PAGE_SIZE);
129 tbuf = vmalloc(tbuf_sz);
130 if (!tbuf) {
131 SNIC_ERR("Failed to Allocate Trace Buffer Size. %d\n", tbuf_sz);
132 SNIC_ERR("Trace Facility not enabled.\n");
133 ret = -ENOMEM;
134
135 return ret;
136 }
137
138 memset(tbuf, 0, tbuf_sz);
139 trc->buf = (struct snic_trc_data *) tbuf;
140 spin_lock_init(&trc->lock);
141
142 ret = snic_trc_debugfs_init();
143 if (ret) {
144 SNIC_ERR("Failed to create Debugfs Files.\n");
145
146 goto error;
147 }
148
149 trc->max_idx = (tbuf_sz / SNIC_TRC_ENTRY_SZ);
150 trc->rd_idx = trc->wr_idx = 0;
151 trc->enable = 1;
152 SNIC_INFO("Trace Facility Enabled.\n Trace Buffer SZ %lu Pages.\n",
153 tbuf_sz / PAGE_SIZE);
154 ret = 0;
155
156 return ret;
157
158error:
159 snic_trc_free();
160
161 return ret;
162} /* end of snic_trc_init */
163
164/*
165 * snic_trc_free : Releases the trace buffer and disables the tracing.
166 */
167void
168snic_trc_free(void)
169{
170 struct snic_trc *trc = &snic_glob->trc;
171
172 trc->enable = 0;
173 snic_trc_debugfs_term();
174
175 if (trc->buf) {
176 vfree(trc->buf);
177 trc->buf = NULL;
178 }
179
180 SNIC_INFO("Trace Facility Disabled.\n");
181} /* end of snic_trc_free */
diff --git a/drivers/scsi/snic/snic_trc.h b/drivers/scsi/snic/snic_trc.h
new file mode 100644
index 000000000000..427faee5f97e
--- /dev/null
+++ b/drivers/scsi/snic/snic_trc.h
@@ -0,0 +1,121 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef __SNIC_TRC_H
19#define __SNIC_TRC_H
20
21#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
22
23extern ssize_t simple_read_from_buffer(void __user *to,
24 size_t count,
25 loff_t *ppos,
26 const void *from,
27 size_t available);
28
29extern unsigned int snic_trace_max_pages;
30
31/* Global Data structure for trace to manage trace functionality */
32struct snic_trc_data {
33 u64 ts; /* Time Stamp */
34 char *fn; /* Ptr to Function Name */
35 u32 hno; /* SCSI Host ID */
36 u32 tag; /* Command Tag */
37 u64 data[5];
38} __attribute__((__packed__));
39
40#define SNIC_TRC_ENTRY_SZ 64 /* in Bytes */
41
42struct snic_trc {
43 spinlock_t lock;
44 struct snic_trc_data *buf; /* Trace Buffer */
45 u32 max_idx; /* Max Index into trace buffer */
46 u32 rd_idx;
47 u32 wr_idx;
48 u32 enable; /* Control Variable for Tracing */
49
50 struct dentry *trc_enable; /* debugfs file object */
51 struct dentry *trc_file;
52};
53
54int snic_trc_init(void);
55void snic_trc_free(void);
56int snic_trc_debugfs_init(void);
57void snic_trc_debugfs_term(void);
58struct snic_trc_data *snic_get_trc_buf(void);
59int snic_get_trc_data(char *buf, int buf_sz);
60
61int snic_debugfs_init(void);
62void snic_debugfs_term(void);
63
64static inline void
65snic_trace(char *fn, u16 hno, u32 tag, u64 d1, u64 d2, u64 d3, u64 d4, u64 d5)
66{
67 struct snic_trc_data *tr_rec = snic_get_trc_buf();
68
69 if (!tr_rec)
70 return;
71
72 tr_rec->fn = (char *)fn;
73 tr_rec->hno = hno;
74 tr_rec->tag = tag;
75 tr_rec->data[0] = d1;
76 tr_rec->data[1] = d2;
77 tr_rec->data[2] = d3;
78 tr_rec->data[3] = d4;
79 tr_rec->data[4] = d5;
80 tr_rec->ts = jiffies; /* Update time stamp at last */
81}
82
83#define SNIC_TRC(_hno, _tag, d1, d2, d3, d4, d5) \
84 do { \
85 if (unlikely(snic_glob->trc.enable)) \
86 snic_trace((char *)__func__, \
87 (u16)(_hno), \
88 (u32)(_tag), \
89 (u64)(d1), \
90 (u64)(d2), \
91 (u64)(d3), \
92 (u64)(d4), \
93 (u64)(d5)); \
94 } while (0)
95#else
96
97#define SNIC_TRC(_hno, _tag, d1, d2, d3, d4, d5) \
98 do { \
99 if (unlikely(snic_log_level & 0x2)) \
100 SNIC_DBG("SnicTrace: %s %2u %2u %llx %llx %llx %llx %llx", \
101 (char *)__func__, \
102 (u16)(_hno), \
103 (u32)(_tag), \
104 (u64)(d1), \
105 (u64)(d2), \
106 (u64)(d3), \
107 (u64)(d4), \
108 (u64)(d5)); \
109 } while (0)
110#endif /* end of CONFIG_SCSI_SNIC_DEBUG_FS */
111
112#define SNIC_TRC_CMD(sc) \
113 ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 | \
114 (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 | \
115 (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | \
116 (u64)sc->cmnd[5])
117
118#define SNIC_TRC_CMD_STATE_FLAGS(sc) \
119 ((u64) CMD_FLAGS(sc) << 32 | CMD_STATE(sc))
120
121#endif /* end of __SNIC_TRC_H */
diff --git a/drivers/scsi/snic/vnic_cq.c b/drivers/scsi/snic/vnic_cq.c
new file mode 100644
index 000000000000..4c8e64e4fba6
--- /dev/null
+++ b/drivers/scsi/snic/vnic_cq.c
@@ -0,0 +1,86 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/pci.h>
21#include "vnic_dev.h"
22#include "vnic_cq.h"
23
24void svnic_cq_free(struct vnic_cq *cq)
25{
26 svnic_dev_free_desc_ring(cq->vdev, &cq->ring);
27
28 cq->ctrl = NULL;
29}
30
31int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq,
32 unsigned int index, unsigned int desc_count, unsigned int desc_size)
33{
34 int err;
35
36 cq->index = index;
37 cq->vdev = vdev;
38
39 cq->ctrl = svnic_dev_get_res(vdev, RES_TYPE_CQ, index);
40 if (!cq->ctrl) {
41 pr_err("Failed to hook CQ[%d] resource\n", index);
42
43 return -EINVAL;
44 }
45
46 err = svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
47 if (err)
48 return err;
49
50 return 0;
51}
52
53void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
54 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
55 unsigned int cq_tail_color, unsigned int interrupt_enable,
56 unsigned int cq_entry_enable, unsigned int cq_message_enable,
57 unsigned int interrupt_offset, u64 cq_message_addr)
58{
59 u64 paddr;
60
61 paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
62 writeq(paddr, &cq->ctrl->ring_base);
63 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
64 iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
65 iowrite32(color_enable, &cq->ctrl->color_enable);
66 iowrite32(cq_head, &cq->ctrl->cq_head);
67 iowrite32(cq_tail, &cq->ctrl->cq_tail);
68 iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
69 iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
70 iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
71 iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
72 iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
73 writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
74}
75
76void svnic_cq_clean(struct vnic_cq *cq)
77{
78 cq->to_clean = 0;
79 cq->last_color = 0;
80
81 iowrite32(0, &cq->ctrl->cq_head);
82 iowrite32(0, &cq->ctrl->cq_tail);
83 iowrite32(1, &cq->ctrl->cq_tail_color);
84
85 svnic_dev_clear_desc_ring(&cq->ring);
86}
diff --git a/drivers/scsi/snic/vnic_cq.h b/drivers/scsi/snic/vnic_cq.h
new file mode 100644
index 000000000000..6e651c3e16f7
--- /dev/null
+++ b/drivers/scsi/snic/vnic_cq.h
@@ -0,0 +1,110 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef _VNIC_CQ_H_
19#define _VNIC_CQ_H_
20
21#include "cq_desc.h"
22#include "vnic_dev.h"
23
24/* Completion queue control */
25struct vnic_cq_ctrl {
26 u64 ring_base; /* 0x00 */
27 u32 ring_size; /* 0x08 */
28 u32 pad0;
29 u32 flow_control_enable; /* 0x10 */
30 u32 pad1;
31 u32 color_enable; /* 0x18 */
32 u32 pad2;
33 u32 cq_head; /* 0x20 */
34 u32 pad3;
35 u32 cq_tail; /* 0x28 */
36 u32 pad4;
37 u32 cq_tail_color; /* 0x30 */
38 u32 pad5;
39 u32 interrupt_enable; /* 0x38 */
40 u32 pad6;
41 u32 cq_entry_enable; /* 0x40 */
42 u32 pad7;
43 u32 cq_message_enable; /* 0x48 */
44 u32 pad8;
45 u32 interrupt_offset; /* 0x50 */
46 u32 pad9;
47 u64 cq_message_addr; /* 0x58 */
48 u32 pad10;
49};
50
51struct vnic_cq {
52 unsigned int index;
53 struct vnic_dev *vdev;
54 struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
55 struct vnic_dev_ring ring;
56 unsigned int to_clean;
57 unsigned int last_color;
58};
59
60static inline unsigned int svnic_cq_service(struct vnic_cq *cq,
61 unsigned int work_to_do,
62 int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
63 u8 type, u16 q_number, u16 completed_index, void *opaque),
64 void *opaque)
65{
66 struct cq_desc *cq_desc;
67 unsigned int work_done = 0;
68 u16 q_number, completed_index;
69 u8 type, color;
70
71 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
72 cq->ring.desc_size * cq->to_clean);
73 cq_desc_dec(cq_desc, &type, &color,
74 &q_number, &completed_index);
75
76 while (color != cq->last_color) {
77
78 if ((*q_service)(cq->vdev, cq_desc, type,
79 q_number, completed_index, opaque))
80 break;
81
82 cq->to_clean++;
83 if (cq->to_clean == cq->ring.desc_count) {
84 cq->to_clean = 0;
85 cq->last_color = cq->last_color ? 0 : 1;
86 }
87
88 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
89 cq->ring.desc_size * cq->to_clean);
90 cq_desc_dec(cq_desc, &type, &color,
91 &q_number, &completed_index);
92
93 work_done++;
94 if (work_done >= work_to_do)
95 break;
96 }
97
98 return work_done;
99}
100
101void svnic_cq_free(struct vnic_cq *cq);
102int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq,
103 unsigned int index, unsigned int desc_count, unsigned int desc_size);
104void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
105 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
106 unsigned int cq_tail_color, unsigned int interrupt_enable,
107 unsigned int cq_entry_enable, unsigned int message_enable,
108 unsigned int interrupt_offset, u64 message_addr);
109void svnic_cq_clean(struct vnic_cq *cq);
110#endif /* _VNIC_CQ_H_ */
diff --git a/drivers/scsi/snic/vnic_cq_fw.h b/drivers/scsi/snic/vnic_cq_fw.h
new file mode 100644
index 000000000000..c2d1bbd44bd1
--- /dev/null
+++ b/drivers/scsi/snic/vnic_cq_fw.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef _VNIC_CQ_FW_H_
19#define _VNIC_CQ_FW_H_
20
21#include "snic_fwint.h"
22
23static inline unsigned int
24vnic_cq_fw_service(struct vnic_cq *cq,
25 int (*q_service)(struct vnic_dev *vdev,
26 unsigned int index,
27 struct snic_fw_req *desc),
28 unsigned int work_to_do)
29
30{
31 struct snic_fw_req *desc;
32 unsigned int work_done = 0;
33 u8 color;
34
35 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs +
36 cq->ring.desc_size * cq->to_clean);
37 snic_color_dec(desc, &color);
38
39 while (color != cq->last_color) {
40
41 if ((*q_service)(cq->vdev, cq->index, desc))
42 break;
43
44 cq->to_clean++;
45 if (cq->to_clean == cq->ring.desc_count) {
46 cq->to_clean = 0;
47 cq->last_color = cq->last_color ? 0 : 1;
48 }
49
50 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs +
51 cq->ring.desc_size * cq->to_clean);
52 snic_color_dec(desc, &color);
53
54 work_done++;
55 if (work_done >= work_to_do)
56 break;
57 }
58
59 return work_done;
60}
61
62#endif /* _VNIC_CQ_FW_H_ */
diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c
new file mode 100644
index 000000000000..e0b5549bc9fb
--- /dev/null
+++ b/drivers/scsi/snic/vnic_dev.c
@@ -0,0 +1,748 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#include <linux/kernel.h>
19#include <linux/errno.h>
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/if_ether.h>
24#include <linux/slab.h>
25#include "vnic_resource.h"
26#include "vnic_devcmd.h"
27#include "vnic_dev.h"
28#include "vnic_stats.h"
29#include "vnic_wq.h"
30
31#define VNIC_DVCMD_TMO 10000 /* Devcmd Timeout value */
32#define VNIC_NOTIFY_INTR_MASK 0x0000ffff00000000ULL
33
34struct devcmd2_controller {
35 struct vnic_wq_ctrl __iomem *wq_ctrl;
36 struct vnic_dev_ring results_ring;
37 struct vnic_wq wq;
38 struct vnic_devcmd2 *cmd_ring;
39 struct devcmd2_result *result;
40 u16 next_result;
41 u16 result_size;
42 int color;
43};
44
45struct vnic_res {
46 void __iomem *vaddr;
47 unsigned int count;
48};
49
50struct vnic_dev {
51 void *priv;
52 struct pci_dev *pdev;
53 struct vnic_res res[RES_TYPE_MAX];
54 enum vnic_dev_intr_mode intr_mode;
55 struct vnic_devcmd __iomem *devcmd;
56 struct vnic_devcmd_notify *notify;
57 struct vnic_devcmd_notify notify_copy;
58 dma_addr_t notify_pa;
59 u32 *linkstatus;
60 dma_addr_t linkstatus_pa;
61 struct vnic_stats *stats;
62 dma_addr_t stats_pa;
63 struct vnic_devcmd_fw_info *fw_info;
64 dma_addr_t fw_info_pa;
65 u64 args[VNIC_DEVCMD_NARGS];
66 struct devcmd2_controller *devcmd2;
67
68 int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
69 int wait);
70};
71
72#define VNIC_MAX_RES_HDR_SIZE \
73 (sizeof(struct vnic_resource_header) + \
74 sizeof(struct vnic_resource) * RES_TYPE_MAX)
75#define VNIC_RES_STRIDE 128
76
77void *svnic_dev_priv(struct vnic_dev *vdev)
78{
79 return vdev->priv;
80}
81
82static int vnic_dev_discover_res(struct vnic_dev *vdev,
83 struct vnic_dev_bar *bar, unsigned int num_bars)
84{
85 struct vnic_resource_header __iomem *rh;
86 struct vnic_resource __iomem *r;
87 u8 type;
88
89 if (num_bars == 0)
90 return -EINVAL;
91
92 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
93 pr_err("vNIC BAR0 res hdr length error\n");
94
95 return -EINVAL;
96 }
97
98 rh = bar->vaddr;
99 if (!rh) {
100 pr_err("vNIC BAR0 res hdr not mem-mapped\n");
101
102 return -EINVAL;
103 }
104
105 if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
106 ioread32(&rh->version) != VNIC_RES_VERSION) {
107 pr_err("vNIC BAR0 res magic/version error exp (%lx/%lx) curr (%x/%x)\n",
108 VNIC_RES_MAGIC, VNIC_RES_VERSION,
109 ioread32(&rh->magic), ioread32(&rh->version));
110
111 return -EINVAL;
112 }
113
114 r = (struct vnic_resource __iomem *)(rh + 1);
115
116 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
117
118 u8 bar_num = ioread8(&r->bar);
119 u32 bar_offset = ioread32(&r->bar_offset);
120 u32 count = ioread32(&r->count);
121 u32 len;
122
123 r++;
124
125 if (bar_num >= num_bars)
126 continue;
127
128 if (!bar[bar_num].len || !bar[bar_num].vaddr)
129 continue;
130
131 switch (type) {
132 case RES_TYPE_WQ:
133 case RES_TYPE_RQ:
134 case RES_TYPE_CQ:
135 case RES_TYPE_INTR_CTRL:
136 /* each count is stride bytes long */
137 len = count * VNIC_RES_STRIDE;
138 if (len + bar_offset > bar->len) {
139 pr_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
140 type, bar_offset,
141 len,
142 bar->len);
143
144 return -EINVAL;
145 }
146 break;
147
148 case RES_TYPE_INTR_PBA_LEGACY:
149 case RES_TYPE_DEVCMD:
150 case RES_TYPE_DEVCMD2:
151 len = count;
152 break;
153
154 default:
155 continue;
156 }
157
158 vdev->res[type].count = count;
159 vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
160 }
161
162 return 0;
163}
164
165unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev,
166 enum vnic_res_type type)
167{
168 return vdev->res[type].count;
169}
170
171void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
172 unsigned int index)
173{
174 if (!vdev->res[type].vaddr)
175 return NULL;
176
177 switch (type) {
178 case RES_TYPE_WQ:
179 case RES_TYPE_RQ:
180 case RES_TYPE_CQ:
181 case RES_TYPE_INTR_CTRL:
182 return (char __iomem *)vdev->res[type].vaddr +
183 index * VNIC_RES_STRIDE;
184
185 default:
186 return (char __iomem *)vdev->res[type].vaddr;
187 }
188}
189
190unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
191 unsigned int desc_count,
192 unsigned int desc_size)
193{
194 /* The base address of the desc rings must be 512 byte aligned.
195 * Descriptor count is aligned to groups of 32 descriptors. A
196 * count of 0 means the maximum 4096 descriptors. Descriptor
197 * size is aligned to 16 bytes.
198 */
199
200 unsigned int count_align = 32;
201 unsigned int desc_align = 16;
202
203 ring->base_align = 512;
204
205 if (desc_count == 0)
206 desc_count = 4096;
207
208 ring->desc_count = ALIGN(desc_count, count_align);
209
210 ring->desc_size = ALIGN(desc_size, desc_align);
211
212 ring->size = ring->desc_count * ring->desc_size;
213 ring->size_unaligned = ring->size + ring->base_align;
214
215 return ring->size_unaligned;
216}
217
218void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
219{
220 memset(ring->descs, 0, ring->size);
221}
222
223int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
224 unsigned int desc_count, unsigned int desc_size)
225{
226 svnic_dev_desc_ring_size(ring, desc_count, desc_size);
227
228 ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
229 ring->size_unaligned,
230 &ring->base_addr_unaligned);
231
232 if (!ring->descs_unaligned) {
233 pr_err("Failed to allocate ring (size=%d), aborting\n",
234 (int)ring->size);
235
236 return -ENOMEM;
237 }
238
239 ring->base_addr = ALIGN(ring->base_addr_unaligned,
240 ring->base_align);
241 ring->descs = (u8 *)ring->descs_unaligned +
242 (ring->base_addr - ring->base_addr_unaligned);
243
244 svnic_dev_clear_desc_ring(ring);
245
246 ring->desc_avail = ring->desc_count - 1;
247
248 return 0;
249}
250
251void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
252{
253 if (ring->descs) {
254 pci_free_consistent(vdev->pdev,
255 ring->size_unaligned,
256 ring->descs_unaligned,
257 ring->base_addr_unaligned);
258 ring->descs = NULL;
259 }
260}
261
262static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
263 int wait)
264{
265 struct devcmd2_controller *dc2c = vdev->devcmd2;
266 struct devcmd2_result *result = dc2c->result + dc2c->next_result;
267 unsigned int i;
268 int delay;
269 int err;
270 u32 posted;
271 u32 new_posted;
272
273 posted = ioread32(&dc2c->wq_ctrl->posted_index);
274
275 if (posted == 0xFFFFFFFF) { /* check for hardware gone */
276 /* Hardware surprise removal: return error */
277 return -ENODEV;
278 }
279
280 new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
281 dc2c->cmd_ring[posted].cmd = cmd;
282 dc2c->cmd_ring[posted].flags = 0;
283
284 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
285 dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
286
287 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
288 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
289 dc2c->cmd_ring[posted].args[i] = vdev->args[i];
290 }
291 /* Adding write memory barrier prevents compiler and/or CPU
292 * reordering, thus avoiding descriptor posting before
293 * descriptor is initialized. Otherwise, hardware can read
294 * stale descriptor fields.
295 */
296 wmb();
297 iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
298
299 if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
300 return 0;
301
302 for (delay = 0; delay < wait; delay++) {
303 udelay(100);
304 if (result->color == dc2c->color) {
305 dc2c->next_result++;
306 if (dc2c->next_result == dc2c->result_size) {
307 dc2c->next_result = 0;
308 dc2c->color = dc2c->color ? 0 : 1;
309 }
310 if (result->error) {
311 err = (int) result->error;
312 if (err != ERR_ECMDUNKNOWN ||
313 cmd != CMD_CAPABILITY)
314 pr_err("Error %d devcmd %d\n",
315 err, _CMD_N(cmd));
316
317 return err;
318 }
319 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
320 /*
321 * Adding the rmb() prevents the compiler
322 * and/or CPU from reordering the reads which
323 * would potentially result in reading stale
324 * values.
325 */
326 rmb();
327 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
328 vdev->args[i] = result->results[i];
329 }
330
331 return 0;
332 }
333 }
334
335 pr_err("Timed out devcmd %d\n", _CMD_N(cmd));
336
337 return -ETIMEDOUT;
338}
339
340static int svnic_dev_init_devcmd2(struct vnic_dev *vdev)
341{
342 struct devcmd2_controller *dc2c = NULL;
343 unsigned int fetch_idx;
344 int ret;
345 void __iomem *p;
346
347 if (vdev->devcmd2)
348 return 0;
349
350 p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
351 if (!p)
352 return -ENODEV;
353
354 dc2c = kzalloc(sizeof(*dc2c), GFP_ATOMIC);
355 if (!dc2c)
356 return -ENOMEM;
357
358 vdev->devcmd2 = dc2c;
359
360 dc2c->color = 1;
361 dc2c->result_size = DEVCMD2_RING_SIZE;
362
363 ret = vnic_wq_devcmd2_alloc(vdev,
364 &dc2c->wq,
365 DEVCMD2_RING_SIZE,
366 DEVCMD2_DESC_SIZE);
367 if (ret)
368 goto err_free_devcmd2;
369
370 fetch_idx = ioread32(&dc2c->wq.ctrl->fetch_index);
371 if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */
372 /* Hardware surprise removal: reset fetch_index */
373 fetch_idx = 0;
374 }
375
376 /*
377 * Don't change fetch_index ever and
378 * set posted_index same as fetch_index
379 * when setting up the WQ for devcmd2.
380 */
381 vnic_wq_init_start(&dc2c->wq, 0, fetch_idx, fetch_idx, 0, 0);
382 svnic_wq_enable(&dc2c->wq);
383 ret = svnic_dev_alloc_desc_ring(vdev,
384 &dc2c->results_ring,
385 DEVCMD2_RING_SIZE,
386 DEVCMD2_DESC_SIZE);
387 if (ret)
388 goto err_free_wq;
389
390 dc2c->result = (struct devcmd2_result *) dc2c->results_ring.descs;
391 dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs;
392 dc2c->wq_ctrl = dc2c->wq.ctrl;
393 vdev->args[0] = (u64) dc2c->results_ring.base_addr | VNIC_PADDR_TARGET;
394 vdev->args[1] = DEVCMD2_RING_SIZE;
395
396 ret = _svnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, VNIC_DVCMD_TMO);
397 if (ret < 0)
398 goto err_free_desc_ring;
399
400 vdev->devcmd_rtn = &_svnic_dev_cmd2;
401 pr_info("DEVCMD2 Initialized.\n");
402
403 return ret;
404
405err_free_desc_ring:
406 svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
407
408err_free_wq:
409 svnic_wq_disable(&dc2c->wq);
410 svnic_wq_free(&dc2c->wq);
411
412err_free_devcmd2:
413 kfree(dc2c);
414 vdev->devcmd2 = NULL;
415
416 return ret;
417} /* end of svnic_dev_init_devcmd2 */
418
419static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
420{
421 struct devcmd2_controller *dc2c = vdev->devcmd2;
422
423 vdev->devcmd2 = NULL;
424 vdev->devcmd_rtn = NULL;
425
426 svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
427 svnic_wq_disable(&dc2c->wq);
428 svnic_wq_free(&dc2c->wq);
429 kfree(dc2c);
430}
431
432int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
433 u64 *a0, u64 *a1, int wait)
434{
435 int err;
436
437 memset(vdev->args, 0, sizeof(vdev->args));
438 vdev->args[0] = *a0;
439 vdev->args[1] = *a1;
440
441 err = (*vdev->devcmd_rtn)(vdev, cmd, wait);
442
443 *a0 = vdev->args[0];
444 *a1 = vdev->args[1];
445
446 return err;
447}
448
449int svnic_dev_fw_info(struct vnic_dev *vdev,
450 struct vnic_devcmd_fw_info **fw_info)
451{
452 u64 a0, a1 = 0;
453 int wait = VNIC_DVCMD_TMO;
454 int err = 0;
455
456 if (!vdev->fw_info) {
457 vdev->fw_info = pci_alloc_consistent(vdev->pdev,
458 sizeof(struct vnic_devcmd_fw_info),
459 &vdev->fw_info_pa);
460 if (!vdev->fw_info)
461 return -ENOMEM;
462
463 a0 = vdev->fw_info_pa;
464
465 /* only get fw_info once and cache it */
466 err = svnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
467 }
468
469 *fw_info = vdev->fw_info;
470
471 return err;
472}
473
474int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
475 unsigned int size, void *value)
476{
477 u64 a0, a1;
478 int wait = VNIC_DVCMD_TMO;
479 int err;
480
481 a0 = offset;
482 a1 = size;
483
484 err = svnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
485
486 switch (size) {
487 case 1:
488 *(u8 *)value = (u8)a0;
489 break;
490 case 2:
491 *(u16 *)value = (u16)a0;
492 break;
493 case 4:
494 *(u32 *)value = (u32)a0;
495 break;
496 case 8:
497 *(u64 *)value = a0;
498 break;
499 default:
500 BUG();
501 break;
502 }
503
504 return err;
505}
506
507int svnic_dev_stats_clear(struct vnic_dev *vdev)
508{
509 u64 a0 = 0, a1 = 0;
510 int wait = VNIC_DVCMD_TMO;
511
512 return svnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
513}
514
515int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
516{
517 u64 a0, a1;
518 int wait = VNIC_DVCMD_TMO;
519
520 if (!vdev->stats) {
521 vdev->stats = pci_alloc_consistent(vdev->pdev,
522 sizeof(struct vnic_stats), &vdev->stats_pa);
523 if (!vdev->stats)
524 return -ENOMEM;
525 }
526
527 *stats = vdev->stats;
528 a0 = vdev->stats_pa;
529 a1 = sizeof(struct vnic_stats);
530
531 return svnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
532}
533
534int svnic_dev_close(struct vnic_dev *vdev)
535{
536 u64 a0 = 0, a1 = 0;
537 int wait = VNIC_DVCMD_TMO;
538
539 return svnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
540}
541
542int svnic_dev_enable_wait(struct vnic_dev *vdev)
543{
544 u64 a0 = 0, a1 = 0;
545 int wait = VNIC_DVCMD_TMO;
546 int err = 0;
547
548 err = svnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
549 if (err == ERR_ECMDUNKNOWN)
550 return svnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
551
552 return err;
553}
554
555int svnic_dev_disable(struct vnic_dev *vdev)
556{
557 u64 a0 = 0, a1 = 0;
558 int wait = VNIC_DVCMD_TMO;
559
560 return svnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
561}
562
563int svnic_dev_open(struct vnic_dev *vdev, int arg)
564{
565 u64 a0 = (u32)arg, a1 = 0;
566 int wait = VNIC_DVCMD_TMO;
567
568 return svnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
569}
570
571int svnic_dev_open_done(struct vnic_dev *vdev, int *done)
572{
573 u64 a0 = 0, a1 = 0;
574 int wait = VNIC_DVCMD_TMO;
575 int err;
576
577 *done = 0;
578
579 err = svnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
580 if (err)
581 return err;
582
583 *done = (a0 == 0);
584
585 return 0;
586}
587
588int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
589{
590 u64 a0, a1;
591 int wait = VNIC_DVCMD_TMO;
592
593 if (!vdev->notify) {
594 vdev->notify = pci_alloc_consistent(vdev->pdev,
595 sizeof(struct vnic_devcmd_notify),
596 &vdev->notify_pa);
597 if (!vdev->notify)
598 return -ENOMEM;
599 }
600
601 a0 = vdev->notify_pa;
602 a1 = ((u64)intr << 32) & VNIC_NOTIFY_INTR_MASK;
603 a1 += sizeof(struct vnic_devcmd_notify);
604
605 return svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
606}
607
608void svnic_dev_notify_unset(struct vnic_dev *vdev)
609{
610 u64 a0, a1;
611 int wait = VNIC_DVCMD_TMO;
612
613 a0 = 0; /* paddr = 0 to unset notify buffer */
614 a1 = VNIC_NOTIFY_INTR_MASK; /* intr num = -1 to unreg for intr */
615 a1 += sizeof(struct vnic_devcmd_notify);
616
617 svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
618}
619
620static int vnic_dev_notify_ready(struct vnic_dev *vdev)
621{
622 u32 *words;
623 unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
624 unsigned int i;
625 u32 csum;
626
627 if (!vdev->notify)
628 return 0;
629
630 do {
631 csum = 0;
632 memcpy(&vdev->notify_copy, vdev->notify,
633 sizeof(struct vnic_devcmd_notify));
634 words = (u32 *)&vdev->notify_copy;
635 for (i = 1; i < nwords; i++)
636 csum += words[i];
637 } while (csum != words[0]);
638
639 return 1;
640}
641
642int svnic_dev_init(struct vnic_dev *vdev, int arg)
643{
644 u64 a0 = (u32)arg, a1 = 0;
645 int wait = VNIC_DVCMD_TMO;
646
647 return svnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
648}
649
650int svnic_dev_link_status(struct vnic_dev *vdev)
651{
652 if (vdev->linkstatus)
653 return *vdev->linkstatus;
654
655 if (!vnic_dev_notify_ready(vdev))
656 return 0;
657
658 return vdev->notify_copy.link_state;
659}
660
661u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev)
662{
663 if (!vnic_dev_notify_ready(vdev))
664 return 0;
665
666 return vdev->notify_copy.link_down_cnt;
667}
668
669void svnic_dev_set_intr_mode(struct vnic_dev *vdev,
670 enum vnic_dev_intr_mode intr_mode)
671{
672 vdev->intr_mode = intr_mode;
673}
674
675enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev)
676{
677 return vdev->intr_mode;
678}
679
680void svnic_dev_unregister(struct vnic_dev *vdev)
681{
682 if (vdev) {
683 if (vdev->notify)
684 pci_free_consistent(vdev->pdev,
685 sizeof(struct vnic_devcmd_notify),
686 vdev->notify,
687 vdev->notify_pa);
688 if (vdev->linkstatus)
689 pci_free_consistent(vdev->pdev,
690 sizeof(u32),
691 vdev->linkstatus,
692 vdev->linkstatus_pa);
693 if (vdev->stats)
694 pci_free_consistent(vdev->pdev,
695 sizeof(struct vnic_stats),
696 vdev->stats, vdev->stats_pa);
697 if (vdev->fw_info)
698 pci_free_consistent(vdev->pdev,
699 sizeof(struct vnic_devcmd_fw_info),
700 vdev->fw_info, vdev->fw_info_pa);
701 if (vdev->devcmd2)
702 vnic_dev_deinit_devcmd2(vdev);
703 kfree(vdev);
704 }
705}
706
707struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev,
708 void *priv,
709 struct pci_dev *pdev,
710 struct vnic_dev_bar *bar,
711 unsigned int num_bars)
712{
713 if (!vdev) {
714 vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
715 if (!vdev)
716 return NULL;
717 }
718
719 vdev->priv = priv;
720 vdev->pdev = pdev;
721
722 if (vnic_dev_discover_res(vdev, bar, num_bars))
723 goto err_out;
724
725 return vdev;
726
727err_out:
728 svnic_dev_unregister(vdev);
729
730 return NULL;
731} /* end of svnic_dev_alloc_discover */
732
733/*
734 * fallback option is left to keep the interface common for other vnics.
735 */
736int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback)
737{
738 int err = -ENODEV;
739 void __iomem *p;
740
741 p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
742 if (p)
743 err = svnic_dev_init_devcmd2(vdev);
744 else
745 pr_err("DEVCMD2 resource not found.\n");
746
747 return err;
748} /* end of svnic_dev_cmd_init */
diff --git a/drivers/scsi/snic/vnic_dev.h b/drivers/scsi/snic/vnic_dev.h
new file mode 100644
index 000000000000..e65726da6504
--- /dev/null
+++ b/drivers/scsi/snic/vnic_dev.h
@@ -0,0 +1,110 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef _VNIC_DEV_H_
19#define _VNIC_DEV_H_
20
21#include "vnic_resource.h"
22#include "vnic_devcmd.h"
23
24#ifndef VNIC_PADDR_TARGET
25#define VNIC_PADDR_TARGET 0x0000000000000000ULL
26#endif
27
28#ifndef readq
29static inline u64 readq(void __iomem *reg)
30{
31 return ((u64)readl(reg + 0x4UL) << 32) | (u64)readl(reg);
32}
33
34static inline void writeq(u64 val, void __iomem *reg)
35{
36 writel(lower_32_bits(val), reg);
37 writel(upper_32_bits(val), reg + 0x4UL);
38}
39#endif
40
41enum vnic_dev_intr_mode {
42 VNIC_DEV_INTR_MODE_UNKNOWN,
43 VNIC_DEV_INTR_MODE_INTX,
44 VNIC_DEV_INTR_MODE_MSI,
45 VNIC_DEV_INTR_MODE_MSIX,
46};
47
48struct vnic_dev_bar {
49 void __iomem *vaddr;
50 dma_addr_t bus_addr;
51 unsigned long len;
52};
53
54struct vnic_dev_ring {
55 void *descs;
56 size_t size;
57 dma_addr_t base_addr;
58 size_t base_align;
59 void *descs_unaligned;
60 size_t size_unaligned;
61 dma_addr_t base_addr_unaligned;
62 unsigned int desc_size;
63 unsigned int desc_count;
64 unsigned int desc_avail;
65};
66
67struct vnic_dev;
68struct vnic_stats;
69
70void *svnic_dev_priv(struct vnic_dev *vdev);
71unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev,
72 enum vnic_res_type type);
73void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
74 unsigned int index);
75unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
76 unsigned int desc_count,
77 unsigned int desc_size);
78void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
79int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
80 unsigned int desc_count, unsigned int desc_size);
81void svnic_dev_free_desc_ring(struct vnic_dev *vdev,
82 struct vnic_dev_ring *ring);
83int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
84 u64 *a0, u64 *a1, int wait);
85int svnic_dev_fw_info(struct vnic_dev *vdev,
86 struct vnic_devcmd_fw_info **fw_info);
87int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
88 unsigned int size, void *value);
89int svnic_dev_stats_clear(struct vnic_dev *vdev);
90int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
91int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
92void svnic_dev_notify_unset(struct vnic_dev *vdev);
93int svnic_dev_link_status(struct vnic_dev *vdev);
94u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev);
95int svnic_dev_close(struct vnic_dev *vdev);
96int svnic_dev_enable_wait(struct vnic_dev *vdev);
97int svnic_dev_disable(struct vnic_dev *vdev);
98int svnic_dev_open(struct vnic_dev *vdev, int arg);
99int svnic_dev_open_done(struct vnic_dev *vdev, int *done);
100int svnic_dev_init(struct vnic_dev *vdev, int arg);
101struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev,
102 void *priv, struct pci_dev *pdev,
103 struct vnic_dev_bar *bar,
104 unsigned int num_bars);
105void svnic_dev_set_intr_mode(struct vnic_dev *vdev,
106 enum vnic_dev_intr_mode intr_mode);
107enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev);
108void svnic_dev_unregister(struct vnic_dev *vdev);
109int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback);
110#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/scsi/snic/vnic_devcmd.h b/drivers/scsi/snic/vnic_devcmd.h
new file mode 100644
index 000000000000..d81b4f0ceaaa
--- /dev/null
+++ b/drivers/scsi/snic/vnic_devcmd.h
@@ -0,0 +1,270 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef _VNIC_DEVCMD_H_
19#define _VNIC_DEVCMD_H_
20
21#define _CMD_NBITS 14
22#define _CMD_VTYPEBITS 10
23#define _CMD_FLAGSBITS 6
24#define _CMD_DIRBITS 2
25
26#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
27#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
28#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
29#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
30
31#define _CMD_NSHIFT 0
32#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
33#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
34#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
35
36/*
37 * Direction bits (from host perspective).
38 */
39#define _CMD_DIR_NONE 0U
40#define _CMD_DIR_WRITE 1U
41#define _CMD_DIR_READ 2U
42#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
43
44/*
45 * Flag bits.
46 */
47#define _CMD_FLAGS_NONE 0U
48#define _CMD_FLAGS_NOWAIT 1U
49
50/*
51 * vNIC type bits.
52 */
53#define _CMD_VTYPE_NONE 0U
54#define _CMD_VTYPE_ENET 1U
55#define _CMD_VTYPE_FC 2U
56#define _CMD_VTYPE_SCSI 4U
57#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
58
59/*
60 * Used to create cmds..
61*/
62#define _CMDCF(dir, flags, vtype, nr) \
63 (((dir) << _CMD_DIRSHIFT) | \
64 ((flags) << _CMD_FLAGSSHIFT) | \
65 ((vtype) << _CMD_VTYPESHIFT) | \
66 ((nr) << _CMD_NSHIFT))
67#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
68#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
69
70/*
71 * Used to decode cmds..
72*/
73#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
74#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
75#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
76#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
77
78enum vnic_devcmd_cmd {
79 CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
80
81 /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
82 CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
83
84 /* dev-specific block member:
85 * in: (u16)a0=offset,(u8)a1=size
86 * out: a0=value */
87 CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
88
89 /* stats clear */
90 CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
91
92 /* stats dump in mem: (u64)a0=paddr to stats area,
93 * (u16)a1=sizeof stats area */
94 CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
95
96 /* nic_cfg in (u32)a0 */
97 CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
98
99 /* set struct vnic_devcmd_notify buffer in mem:
100 * in:
101 * (u64)a0=paddr to notify (set paddr=0 to unset)
102 * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
103 * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
104 * out:
105 * (u32)a1 = effective size
106 */
107 CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
108
109 /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
110 CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
111
112 /* open status:
113 * out: a0=0 open complete, a0=1 open in progress */
114 CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
115
116 /* close vnic */
117 CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
118
119 /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
120 CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
121
122 /* enable virtual link */
123 CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
124
125 /* enable virtual link, waiting variant. */
126 CMD_ENABLE_WAIT = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
127
128 /* disable virtual link */
129 CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
130
131 /* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
132 CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
133
134 /* init status:
135 * out: a0=0 init complete, a0=1 init in progress
136 * if a0=0, a1=errno */
137 CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
138
139 /* undo initialize of virtual link */
140 CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
141
142 /* check fw capability of a cmd:
143 * in: (u32)a0=cmd
144 * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
145 CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
146
147 /*
148 * Initialization for the devcmd2 interface.
149 * in: (u64) a0=host result buffer physical address
150 * in: (u16) a1=number of entries in result buffer
151 */
152 CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57)
153};
154
155/* flags for CMD_OPEN */
156#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
157
158/* flags for CMD_INIT */
159#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
160
161/* flags for CMD_PACKET_FILTER */
162#define CMD_PFILTER_DIRECTED 0x01
163#define CMD_PFILTER_MULTICAST 0x02
164#define CMD_PFILTER_BROADCAST 0x04
165#define CMD_PFILTER_PROMISCUOUS 0x08
166#define CMD_PFILTER_ALL_MULTICAST 0x10
167
168enum vnic_devcmd_status {
169 STAT_NONE = 0,
170 STAT_BUSY = 1 << 0, /* cmd in progress */
171 STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
172};
173
174enum vnic_devcmd_error {
175 ERR_SUCCESS = 0,
176 ERR_EINVAL = 1,
177 ERR_EFAULT = 2,
178 ERR_EPERM = 3,
179 ERR_EBUSY = 4,
180 ERR_ECMDUNKNOWN = 5,
181 ERR_EBADSTATE = 6,
182 ERR_ENOMEM = 7,
183 ERR_ETIMEDOUT = 8,
184 ERR_ELINKDOWN = 9,
185};
186
187struct vnic_devcmd_fw_info {
188 char fw_version[32];
189 char fw_build[32];
190 char hw_version[32];
191 char hw_serial_number[32];
192};
193
194struct vnic_devcmd_notify {
195 u32 csum; /* checksum over following words */
196
197 u32 link_state; /* link up == 1 */
198 u32 port_speed; /* effective port speed (rate limit) */
199 u32 mtu; /* MTU */
200 u32 msglvl; /* requested driver msg lvl */
201 u32 uif; /* uplink interface */
202 u32 status; /* status bits (see VNIC_STF_*) */
203 u32 error; /* error code (see ERR_*) for first ERR */
204 u32 link_down_cnt; /* running count of link down transitions */
205};
206#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
207
208struct vnic_devcmd_provinfo {
209 u8 oui[3];
210 u8 type;
211 u8 data[0];
212};
213
214/*
215 * Writing cmd register causes STAT_BUSY to get set in status register.
216 * When cmd completes, STAT_BUSY will be cleared.
217 *
218 * If cmd completed successfully STAT_ERROR will be clear
219 * and args registers contain cmd-specific results.
220 *
221 * If cmd error, STAT_ERROR will be set and args[0] contains error code.
222 *
223 * status register is read-only. While STAT_BUSY is set,
224 * all other register contents are read-only.
225 */
226
227/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
228#define VNIC_DEVCMD_NARGS 15
229struct vnic_devcmd {
230 u32 status; /* RO */
231 u32 cmd; /* RW */
232 u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
233};
234
235
236/*
237 * Version 2 of the interface.
238 *
239 * Some things are carried over, notably the vnic_devcmd_cmd enum.
240 */
241
242/*
243 * Flags for vnic_devcmd2.flags
244 */
245
246#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */
247
248#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS
249struct vnic_devcmd2 {
250 u16 pad;
251 u16 flags;
252 u32 cmd; /* same command #defines as original */
253 u64 args[VNIC_DEVCMD2_NARGS];
254};
255
256#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS
257struct devcmd2_result {
258 u64 results[VNIC_DEVCMD2_NRESULTS];
259 u32 pad;
260 u16 completed_index; /* into copy WQ */
261 u8 error; /* same error codes as original */
262 u8 color; /* 0 or 1 as with completion queues */
263};
264
265#define DEVCMD2_RING_SIZE 32
266#define DEVCMD2_DESC_SIZE 128
267
268#define DEVCMD2_RESULTS_SIZE_MAX ((1 << 16) - 1)
269
270#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/scsi/snic/vnic_intr.c b/drivers/scsi/snic/vnic_intr.c
new file mode 100644
index 000000000000..a7d54806787d
--- /dev/null
+++ b/drivers/scsi/snic/vnic_intr.c
@@ -0,0 +1,59 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#include <linux/kernel.h>
19#include <linux/errno.h>
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include "vnic_dev.h"
24#include "vnic_intr.h"
25
26void svnic_intr_free(struct vnic_intr *intr)
27{
28 intr->ctrl = NULL;
29}
30
31int svnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
32 unsigned int index)
33{
34 intr->index = index;
35 intr->vdev = vdev;
36
37 intr->ctrl = svnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
38 if (!intr->ctrl) {
39 pr_err("Failed to hook INTR[%d].ctrl resource\n",
40 index);
41 return -EINVAL;
42 }
43
44 return 0;
45}
46
47void svnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
48 unsigned int coalescing_type, unsigned int mask_on_assertion)
49{
50 iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
51 iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
52 iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
53 iowrite32(0, &intr->ctrl->int_credits);
54}
55
56void svnic_intr_clean(struct vnic_intr *intr)
57{
58 iowrite32(0, &intr->ctrl->int_credits);
59}
diff --git a/drivers/scsi/snic/vnic_intr.h b/drivers/scsi/snic/vnic_intr.h
new file mode 100644
index 000000000000..4547f603fe5e
--- /dev/null
+++ b/drivers/scsi/snic/vnic_intr.h
@@ -0,0 +1,105 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef _VNIC_INTR_H_
19#define _VNIC_INTR_H_
20
21#include <linux/pci.h>
22#include "vnic_dev.h"
23
24#define VNIC_INTR_TIMER_MAX 0xffff
25
26#define VNIC_INTR_TIMER_TYPE_ABS 0
27#define VNIC_INTR_TIMER_TYPE_QUIET 1
28
29/* Interrupt control */
30struct vnic_intr_ctrl {
31 u32 coalescing_timer; /* 0x00 */
32 u32 pad0;
33 u32 coalescing_value; /* 0x08 */
34 u32 pad1;
35 u32 coalescing_type; /* 0x10 */
36 u32 pad2;
37 u32 mask_on_assertion; /* 0x18 */
38 u32 pad3;
39 u32 mask; /* 0x20 */
40 u32 pad4;
41 u32 int_credits; /* 0x28 */
42 u32 pad5;
43 u32 int_credit_return; /* 0x30 */
44 u32 pad6;
45};
46
47struct vnic_intr {
48 unsigned int index;
49 struct vnic_dev *vdev;
50 struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
51};
52
53static inline void
54svnic_intr_unmask(struct vnic_intr *intr)
55{
56 iowrite32(0, &intr->ctrl->mask);
57}
58
59static inline void
60svnic_intr_mask(struct vnic_intr *intr)
61{
62 iowrite32(1, &intr->ctrl->mask);
63}
64
65static inline void
66svnic_intr_return_credits(struct vnic_intr *intr,
67 unsigned int credits,
68 int unmask,
69 int reset_timer)
70{
71#define VNIC_INTR_UNMASK_SHIFT 16
72#define VNIC_INTR_RESET_TIMER_SHIFT 17
73
74 u32 int_credit_return = (credits & 0xffff) |
75 (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
76 (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
77
78 iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
79}
80
81static inline unsigned int
82svnic_intr_credits(struct vnic_intr *intr)
83{
84 return ioread32(&intr->ctrl->int_credits);
85}
86
87static inline void
88svnic_intr_return_all_credits(struct vnic_intr *intr)
89{
90 unsigned int credits = svnic_intr_credits(intr);
91 int unmask = 1;
92 int reset_timer = 1;
93
94 svnic_intr_return_credits(intr, credits, unmask, reset_timer);
95}
96
97void svnic_intr_free(struct vnic_intr *);
98int svnic_intr_alloc(struct vnic_dev *, struct vnic_intr *, unsigned int);
99void svnic_intr_init(struct vnic_intr *intr,
100 unsigned int coalescing_timer,
101 unsigned int coalescing_type,
102 unsigned int mask_on_assertion);
103void svnic_intr_clean(struct vnic_intr *);
104
105#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/scsi/snic/vnic_resource.h b/drivers/scsi/snic/vnic_resource.h
new file mode 100644
index 000000000000..9713d6835db3
--- /dev/null
+++ b/drivers/scsi/snic/vnic_resource.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef _VNIC_RESOURCE_H_
19#define _VNIC_RESOURCE_H_
20
21#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
22#define VNIC_RES_VERSION 0x00000000L
23
24/* vNIC resource types */
25enum vnic_res_type {
26 RES_TYPE_EOL, /* End-of-list */
27 RES_TYPE_WQ, /* Work queues */
28 RES_TYPE_RQ, /* Receive queues */
29 RES_TYPE_CQ, /* Completion queues */
30 RES_TYPE_RSVD1,
31 RES_TYPE_NIC_CFG, /* Enet NIC config registers */
32 RES_TYPE_RSVD2,
33 RES_TYPE_RSVD3,
34 RES_TYPE_RSVD4,
35 RES_TYPE_RSVD5,
36 RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
37 RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
38 RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
39 RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */
40 RES_TYPE_RSVD6,
41 RES_TYPE_RSVD7,
42 RES_TYPE_DEVCMD, /* Device command region */
43 RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
44 RES_TYPE_SUBVNIC, /* subvnic resource type */
45 RES_TYPE_MQ_WQ, /* MQ Work queues */
46 RES_TYPE_MQ_RQ, /* MQ Receive queues */
47 RES_TYPE_MQ_CQ, /* MQ Completion queues */
48 RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */
49 RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */
50 RES_TYPE_DEVCMD2, /* Device control region */
51
52 RES_TYPE_MAX, /* Count of resource types */
53};
54
55struct vnic_resource_header {
56 u32 magic;
57 u32 version;
58};
59
60struct vnic_resource {
61 u8 type;
62 u8 bar;
63 u8 pad[2];
64 u32 bar_offset;
65 u32 count;
66};
67
68#endif /* _VNIC_RESOURCE_H_ */
diff --git a/drivers/scsi/snic/vnic_snic.h b/drivers/scsi/snic/vnic_snic.h
new file mode 100644
index 000000000000..514d39f5cf00
--- /dev/null
+++ b/drivers/scsi/snic/vnic_snic.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef _VNIC_SNIC_H_
19#define _VNIC_SNIC_H_
20
21#define VNIC_SNIC_WQ_DESCS_MIN 64
22#define VNIC_SNIC_WQ_DESCS_MAX 1024
23
24#define VNIC_SNIC_MAXDATAFIELDSIZE_MIN 256
25#define VNIC_SNIC_MAXDATAFIELDSIZE_MAX 2112
26
27#define VNIC_SNIC_IO_THROTTLE_COUNT_MIN 1
28#define VNIC_SNIC_IO_THROTTLE_COUNT_MAX 1024
29
30#define VNIC_SNIC_PORT_DOWN_TIMEOUT_MIN 0
31#define VNIC_SNIC_PORT_DOWN_TIMEOUT_MAX 240000
32
33#define VNIC_SNIC_PORT_DOWN_IO_RETRIES_MIN 0
34#define VNIC_SNIC_PORT_DOWN_IO_RETRIES_MAX 255
35
36#define VNIC_SNIC_LUNS_PER_TARGET_MIN 1
37#define VNIC_SNIC_LUNS_PER_TARGET_MAX 1024
38
39/* Device-specific region: scsi configuration */
40struct vnic_snic_config {
41 u32 flags;
42 u32 wq_enet_desc_count;
43 u32 io_throttle_count;
44 u32 port_down_timeout;
45 u32 port_down_io_retries;
46 u32 luns_per_tgt;
47 u16 maxdatafieldsize;
48 u16 intr_timer;
49 u8 intr_timer_type;
50 u8 _resvd2;
51 u8 xpt_type;
52 u8 hid;
53};
54#endif /* _VNIC_SNIC_H_ */
diff --git a/drivers/scsi/snic/vnic_stats.h b/drivers/scsi/snic/vnic_stats.h
new file mode 100644
index 000000000000..370a37c97748
--- /dev/null
+++ b/drivers/scsi/snic/vnic_stats.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef _VNIC_STATS_H_
19#define _VNIC_STATS_H_
20
21/* Tx statistics */
22struct vnic_tx_stats {
23 u64 tx_frames_ok;
24 u64 tx_unicast_frames_ok;
25 u64 tx_multicast_frames_ok;
26 u64 tx_broadcast_frames_ok;
27 u64 tx_bytes_ok;
28 u64 tx_unicast_bytes_ok;
29 u64 tx_multicast_bytes_ok;
30 u64 tx_broadcast_bytes_ok;
31 u64 tx_drops;
32 u64 tx_errors;
33 u64 tx_tso;
34 u64 rsvd[16];
35};
36
37/* Rx statistics */
38struct vnic_rx_stats {
39 u64 rx_frames_ok;
40 u64 rx_frames_total;
41 u64 rx_unicast_frames_ok;
42 u64 rx_multicast_frames_ok;
43 u64 rx_broadcast_frames_ok;
44 u64 rx_bytes_ok;
45 u64 rx_unicast_bytes_ok;
46 u64 rx_multicast_bytes_ok;
47 u64 rx_broadcast_bytes_ok;
48 u64 rx_drop;
49 u64 rx_no_bufs;
50 u64 rx_errors;
51 u64 rx_rss;
52 u64 rx_crc_errors;
53 u64 rx_frames_64;
54 u64 rx_frames_127;
55 u64 rx_frames_255;
56 u64 rx_frames_511;
57 u64 rx_frames_1023;
58 u64 rx_frames_1518;
59 u64 rx_frames_to_max;
60 u64 rsvd[16];
61};
62
63struct vnic_stats {
64 struct vnic_tx_stats tx;
65 struct vnic_rx_stats rx;
66};
67
68#endif /* _VNIC_STATS_H_ */
diff --git a/drivers/scsi/snic/vnic_wq.c b/drivers/scsi/snic/vnic_wq.c
new file mode 100644
index 000000000000..1e91d432089e
--- /dev/null
+++ b/drivers/scsi/snic/vnic_wq.c
@@ -0,0 +1,237 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/pci.h>
21#include <linux/delay.h>
22#include <linux/slab.h>
23#include "vnic_dev.h"
24#include "vnic_wq.h"
25
26static inline int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
27 unsigned int index, enum vnic_res_type res_type)
28{
29 wq->ctrl = svnic_dev_get_res(vdev, res_type, index);
30 if (!wq->ctrl)
31 return -EINVAL;
32
33 return 0;
34}
35
36static inline int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
37 unsigned int index, unsigned int desc_count, unsigned int desc_size)
38{
39 return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count,
40 desc_size);
41}
42
43static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
44{
45 struct vnic_wq_buf *buf;
46 unsigned int i, j, count = wq->ring.desc_count;
47 unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
48
49 for (i = 0; i < blks; i++) {
50 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
51 if (!wq->bufs[i]) {
52 pr_err("Failed to alloc wq_bufs\n");
53
54 return -ENOMEM;
55 }
56 }
57
58 for (i = 0; i < blks; i++) {
59 buf = wq->bufs[i];
60 for (j = 0; j < VNIC_WQ_BUF_DFLT_BLK_ENTRIES; j++) {
61 buf->index = i * VNIC_WQ_BUF_DFLT_BLK_ENTRIES + j;
62 buf->desc = (u8 *)wq->ring.descs +
63 wq->ring.desc_size * buf->index;
64 if (buf->index + 1 == count) {
65 buf->next = wq->bufs[0];
66 break;
67 } else if (j + 1 == VNIC_WQ_BUF_DFLT_BLK_ENTRIES) {
68 buf->next = wq->bufs[i + 1];
69 } else {
70 buf->next = buf + 1;
71 buf++;
72 }
73 }
74 }
75
76 wq->to_use = wq->to_clean = wq->bufs[0];
77
78 return 0;
79}
80
81void svnic_wq_free(struct vnic_wq *wq)
82{
83 struct vnic_dev *vdev;
84 unsigned int i;
85
86 vdev = wq->vdev;
87
88 svnic_dev_free_desc_ring(vdev, &wq->ring);
89
90 for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
91 kfree(wq->bufs[i]);
92 wq->bufs[i] = NULL;
93 }
94
95 wq->ctrl = NULL;
96
97}
98
99int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
100 unsigned int desc_count, unsigned int desc_size)
101{
102 int err;
103
104 wq->index = 0;
105 wq->vdev = vdev;
106
107 err = vnic_wq_get_ctrl(vdev, wq, 0, RES_TYPE_DEVCMD2);
108 if (err) {
109 pr_err("Failed to get devcmd2 resource\n");
110
111 return err;
112 }
113
114 svnic_wq_disable(wq);
115
116 err = vnic_wq_alloc_ring(vdev, wq, 0, desc_count, desc_size);
117 if (err)
118 return err;
119
120 return 0;
121}
122
123int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
124 unsigned int index, unsigned int desc_count, unsigned int desc_size)
125{
126 int err;
127
128 wq->index = index;
129 wq->vdev = vdev;
130
131 err = vnic_wq_get_ctrl(vdev, wq, index, RES_TYPE_WQ);
132 if (err) {
133 pr_err("Failed to hook WQ[%d] resource\n", index);
134
135 return err;
136 }
137
138 svnic_wq_disable(wq);
139
140 err = vnic_wq_alloc_ring(vdev, wq, index, desc_count, desc_size);
141 if (err)
142 return err;
143
144 err = vnic_wq_alloc_bufs(wq);
145 if (err) {
146 svnic_wq_free(wq);
147
148 return err;
149 }
150
151 return 0;
152}
153
154void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
155 unsigned int fetch_index, unsigned int posted_index,
156 unsigned int error_interrupt_enable,
157 unsigned int error_interrupt_offset)
158{
159 u64 paddr;
160 unsigned int count = wq->ring.desc_count;
161
162 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
163 writeq(paddr, &wq->ctrl->ring_base);
164 iowrite32(count, &wq->ctrl->ring_size);
165 iowrite32(fetch_index, &wq->ctrl->fetch_index);
166 iowrite32(posted_index, &wq->ctrl->posted_index);
167 iowrite32(cq_index, &wq->ctrl->cq_index);
168 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
169 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
170 iowrite32(0, &wq->ctrl->error_status);
171
172 wq->to_use = wq->to_clean =
173 &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
174 [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
175}
176
177void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
178 unsigned int error_interrupt_enable,
179 unsigned int error_interrupt_offset)
180{
181 vnic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable,
182 error_interrupt_offset);
183}
184
185unsigned int svnic_wq_error_status(struct vnic_wq *wq)
186{
187 return ioread32(&wq->ctrl->error_status);
188}
189
190void svnic_wq_enable(struct vnic_wq *wq)
191{
192 iowrite32(1, &wq->ctrl->enable);
193}
194
195int svnic_wq_disable(struct vnic_wq *wq)
196{
197 unsigned int wait;
198
199 iowrite32(0, &wq->ctrl->enable);
200
201 /* Wait for HW to ACK disable request */
202 for (wait = 0; wait < 100; wait++) {
203 if (!(ioread32(&wq->ctrl->running)))
204 return 0;
205 udelay(1);
206 }
207
208 pr_err("Failed to disable WQ[%d]\n", wq->index);
209
210 return -ETIMEDOUT;
211}
212
213void svnic_wq_clean(struct vnic_wq *wq,
214 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
215{
216 struct vnic_wq_buf *buf;
217
218 BUG_ON(ioread32(&wq->ctrl->enable));
219
220 buf = wq->to_clean;
221
222 while (svnic_wq_desc_used(wq) > 0) {
223
224 (*buf_clean)(wq, buf);
225
226 buf = wq->to_clean = buf->next;
227 wq->ring.desc_avail++;
228 }
229
230 wq->to_use = wq->to_clean = wq->bufs[0];
231
232 iowrite32(0, &wq->ctrl->fetch_index);
233 iowrite32(0, &wq->ctrl->posted_index);
234 iowrite32(0, &wq->ctrl->error_status);
235
236 svnic_dev_clear_desc_ring(&wq->ring);
237}
diff --git a/drivers/scsi/snic/vnic_wq.h b/drivers/scsi/snic/vnic_wq.h
new file mode 100644
index 000000000000..7cc031c7ceba
--- /dev/null
+++ b/drivers/scsi/snic/vnic_wq.h
@@ -0,0 +1,170 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef _VNIC_WQ_H_
19#define _VNIC_WQ_H_
20
21#include <linux/pci.h>
22#include "vnic_dev.h"
23#include "vnic_cq.h"
24
25/* Work queue control */
26struct vnic_wq_ctrl {
27 u64 ring_base; /* 0x00 */
28 u32 ring_size; /* 0x08 */
29 u32 pad0;
30 u32 posted_index; /* 0x10 */
31 u32 pad1;
32 u32 cq_index; /* 0x18 */
33 u32 pad2;
34 u32 enable; /* 0x20 */
35 u32 pad3;
36 u32 running; /* 0x28 */
37 u32 pad4;
38 u32 fetch_index; /* 0x30 */
39 u32 pad5;
40 u32 dca_value; /* 0x38 */
41 u32 pad6;
42 u32 error_interrupt_enable; /* 0x40 */
43 u32 pad7;
44 u32 error_interrupt_offset; /* 0x48 */
45 u32 pad8;
46 u32 error_status; /* 0x50 */
47 u32 pad9;
48};
49
50struct vnic_wq_buf {
51 struct vnic_wq_buf *next;
52 dma_addr_t dma_addr;
53 void *os_buf;
54 unsigned int len;
55 unsigned int index;
56 int sop;
57 void *desc;
58};
59
60/* Break the vnic_wq_buf allocations into blocks of 64 entries */
61#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32
62#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64
63#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
64 ((unsigned int)(entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
65 VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
66#define VNIC_WQ_BUF_BLK_SZ \
67 (VNIC_WQ_BUF_DFLT_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
68#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
69 DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
70#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
71 DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
72#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
73
74struct vnic_wq {
75 unsigned int index;
76 struct vnic_dev *vdev;
77 struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
78 struct vnic_dev_ring ring;
79 struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
80 struct vnic_wq_buf *to_use;
81 struct vnic_wq_buf *to_clean;
82 unsigned int pkts_outstanding;
83};
84
85static inline unsigned int svnic_wq_desc_avail(struct vnic_wq *wq)
86{
87 /* how many does SW own? */
88 return wq->ring.desc_avail;
89}
90
91static inline unsigned int svnic_wq_desc_used(struct vnic_wq *wq)
92{
93 /* how many does HW own? */
94 return wq->ring.desc_count - wq->ring.desc_avail - 1;
95}
96
97static inline void *svnic_wq_next_desc(struct vnic_wq *wq)
98{
99 return wq->to_use->desc;
100}
101
102static inline void svnic_wq_post(struct vnic_wq *wq,
103 void *os_buf, dma_addr_t dma_addr,
104 unsigned int len, int sop, int eop)
105{
106 struct vnic_wq_buf *buf = wq->to_use;
107
108 buf->sop = sop;
109 buf->os_buf = eop ? os_buf : NULL;
110 buf->dma_addr = dma_addr;
111 buf->len = len;
112
113 buf = buf->next;
114 if (eop) {
115 /* Adding write memory barrier prevents compiler and/or CPU
116 * reordering, thus avoiding descriptor posting before
117 * descriptor is initialized. Otherwise, hardware can read
118 * stale descriptor fields.
119 */
120 wmb();
121 iowrite32(buf->index, &wq->ctrl->posted_index);
122 }
123 wq->to_use = buf;
124
125 wq->ring.desc_avail--;
126}
127
128static inline void svnic_wq_service(struct vnic_wq *wq,
129 struct cq_desc *cq_desc, u16 completed_index,
130 void (*buf_service)(struct vnic_wq *wq,
131 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
132 void *opaque)
133{
134 struct vnic_wq_buf *buf;
135
136 buf = wq->to_clean;
137 while (1) {
138
139 (*buf_service)(wq, cq_desc, buf, opaque);
140
141 wq->ring.desc_avail++;
142
143 wq->to_clean = buf->next;
144
145 if (buf->index == completed_index)
146 break;
147
148 buf = wq->to_clean;
149 }
150}
151
152void svnic_wq_free(struct vnic_wq *wq);
153int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
154 unsigned int index, unsigned int desc_count, unsigned int desc_size);
155int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
156 unsigned int desc_count, unsigned int desc_size);
157void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
158 unsigned int fetch_index, unsigned int post_index,
159 unsigned int error_interrupt_enable,
160 unsigned int error_interrupt_offset);
161
162void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
163 unsigned int error_interrupt_enable,
164 unsigned int error_interrupt_offset);
165unsigned int svnic_wq_error_status(struct vnic_wq *wq);
166void svnic_wq_enable(struct vnic_wq *wq);
167int svnic_wq_disable(struct vnic_wq *wq);
168void svnic_wq_clean(struct vnic_wq *wq,
169 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
170#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/scsi/snic/wq_enet_desc.h b/drivers/scsi/snic/wq_enet_desc.h
new file mode 100644
index 000000000000..68f62b6d105b
--- /dev/null
+++ b/drivers/scsi/snic/wq_enet_desc.h
@@ -0,0 +1,96 @@
1/*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18#ifndef _WQ_ENET_DESC_H_
19#define _WQ_ENET_DESC_H_
20
21/* Ethernet work queue descriptor: 16B */
22struct wq_enet_desc {
23 __le64 address;
24 __le16 length;
25 __le16 mss_loopback;
26 __le16 header_length_flags;
27 __le16 vlan_tag;
28};
29
30#define WQ_ENET_ADDR_BITS 64
31#define WQ_ENET_LEN_BITS 14
32#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
33#define WQ_ENET_MSS_BITS 14
34#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
35#define WQ_ENET_MSS_SHIFT 2
36#define WQ_ENET_LOOPBACK_SHIFT 1
37#define WQ_ENET_HDRLEN_BITS 10
38#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
39#define WQ_ENET_FLAGS_OM_BITS 2
40#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
41#define WQ_ENET_FLAGS_EOP_SHIFT 12
42#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
43#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
44#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
45
46#define WQ_ENET_OFFLOAD_MODE_CSUM 0
47#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
48#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
49#define WQ_ENET_OFFLOAD_MODE_TSO 3
50
51static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
52 u64 address, u16 length, u16 mss, u16 header_length,
53 u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
54 u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
55{
56 desc->address = cpu_to_le64(address);
57 desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
58 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
59 WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
60 desc->header_length_flags = cpu_to_le16(
61 (header_length & WQ_ENET_HDRLEN_MASK) |
62 (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
63 (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
64 (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
65 (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
66 (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
67 desc->vlan_tag = cpu_to_le16(vlan_tag);
68}
69
70static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
71 u64 *address, u16 *length, u16 *mss, u16 *header_length,
72 u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
73 u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
74{
75 *address = le64_to_cpu(desc->address);
76 *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
77 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
78 WQ_ENET_MSS_MASK;
79 *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
80 WQ_ENET_LOOPBACK_SHIFT) & 1);
81 *header_length = le16_to_cpu(desc->header_length_flags) &
82 WQ_ENET_HDRLEN_MASK;
83 *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
84 WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
85 *eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
86 WQ_ENET_FLAGS_EOP_SHIFT) & 1);
87 *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
88 WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
89 *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
90 WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
91 *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
92 WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
93 *vlan_tag = le16_to_cpu(desc->vlan_tag);
94}
95
96#endif /* _WQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 9a1c34205254..3f25b8fa921d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -471,6 +471,47 @@ static void st_release_request(struct st_request *streq)
471 kfree(streq); 471 kfree(streq);
472} 472}
473 473
474static void st_do_stats(struct scsi_tape *STp, struct request *req)
475{
476 ktime_t now;
477
478 now = ktime_get();
479 if (req->cmd[0] == WRITE_6) {
480 now = ktime_sub(now, STp->stats->write_time);
481 atomic64_add(ktime_to_ns(now), &STp->stats->tot_write_time);
482 atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
483 atomic64_inc(&STp->stats->write_cnt);
484 if (req->errors) {
485 atomic64_add(atomic_read(&STp->stats->last_write_size)
486 - STp->buffer->cmdstat.residual,
487 &STp->stats->write_byte_cnt);
488 if (STp->buffer->cmdstat.residual > 0)
489 atomic64_inc(&STp->stats->resid_cnt);
490 } else
491 atomic64_add(atomic_read(&STp->stats->last_write_size),
492 &STp->stats->write_byte_cnt);
493 } else if (req->cmd[0] == READ_6) {
494 now = ktime_sub(now, STp->stats->read_time);
495 atomic64_add(ktime_to_ns(now), &STp->stats->tot_read_time);
496 atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
497 atomic64_inc(&STp->stats->read_cnt);
498 if (req->errors) {
499 atomic64_add(atomic_read(&STp->stats->last_read_size)
500 - STp->buffer->cmdstat.residual,
501 &STp->stats->read_byte_cnt);
502 if (STp->buffer->cmdstat.residual > 0)
503 atomic64_inc(&STp->stats->resid_cnt);
504 } else
505 atomic64_add(atomic_read(&STp->stats->last_read_size),
506 &STp->stats->read_byte_cnt);
507 } else {
508 now = ktime_sub(now, STp->stats->other_time);
509 atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
510 atomic64_inc(&STp->stats->other_cnt);
511 }
512 atomic64_dec(&STp->stats->in_flight);
513}
514
474static void st_scsi_execute_end(struct request *req, int uptodate) 515static void st_scsi_execute_end(struct request *req, int uptodate)
475{ 516{
476 struct st_request *SRpnt = req->end_io_data; 517 struct st_request *SRpnt = req->end_io_data;
@@ -480,6 +521,8 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
480 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors; 521 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
481 STp->buffer->cmdstat.residual = req->resid_len; 522 STp->buffer->cmdstat.residual = req->resid_len;
482 523
524 st_do_stats(STp, req);
525
483 tmp = SRpnt->bio; 526 tmp = SRpnt->bio;
484 if (SRpnt->waiting) 527 if (SRpnt->waiting)
485 complete(SRpnt->waiting); 528 complete(SRpnt->waiting);
@@ -496,6 +539,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
496 struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data; 539 struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
497 int err = 0; 540 int err = 0;
498 int write = (data_direction == DMA_TO_DEVICE); 541 int write = (data_direction == DMA_TO_DEVICE);
542 struct scsi_tape *STp = SRpnt->stp;
499 543
500 req = blk_get_request(SRpnt->stp->device->request_queue, write, 544 req = blk_get_request(SRpnt->stp->device->request_queue, write,
501 GFP_KERNEL); 545 GFP_KERNEL);
@@ -516,6 +560,17 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
516 } 560 }
517 } 561 }
518 562
563 atomic64_inc(&STp->stats->in_flight);
564 if (cmd[0] == WRITE_6) {
565 atomic_set(&STp->stats->last_write_size, bufflen);
566 STp->stats->write_time = ktime_get();
567 } else if (cmd[0] == READ_6) {
568 atomic_set(&STp->stats->last_read_size, bufflen);
569 STp->stats->read_time = ktime_get();
570 } else {
571 STp->stats->other_time = ktime_get();
572 }
573
519 SRpnt->bio = req->bio; 574 SRpnt->bio = req->bio;
520 req->cmd_len = COMMAND_SIZE(cmd[0]); 575 req->cmd_len = COMMAND_SIZE(cmd[0]);
521 memset(req->cmd, 0, BLK_MAX_CDB); 576 memset(req->cmd, 0, BLK_MAX_CDB);
@@ -4222,6 +4277,12 @@ static int st_probe(struct device *dev)
4222 } 4277 }
4223 tpnt->index = error; 4278 tpnt->index = error;
4224 sprintf(disk->disk_name, "st%d", tpnt->index); 4279 sprintf(disk->disk_name, "st%d", tpnt->index);
4280 tpnt->stats = kzalloc(sizeof(struct scsi_tape_stats), GFP_KERNEL);
4281 if (tpnt->stats == NULL) {
4282 sdev_printk(KERN_ERR, SDp,
4283 "st: Can't allocate statistics.\n");
4284 goto out_idr_remove;
4285 }
4225 4286
4226 dev_set_drvdata(dev, tpnt); 4287 dev_set_drvdata(dev, tpnt);
4227 4288
@@ -4241,6 +4302,8 @@ static int st_probe(struct device *dev)
4241 4302
4242out_remove_devs: 4303out_remove_devs:
4243 remove_cdevs(tpnt); 4304 remove_cdevs(tpnt);
4305 kfree(tpnt->stats);
4306out_idr_remove:
4244 spin_lock(&st_index_lock); 4307 spin_lock(&st_index_lock);
4245 idr_remove(&st_index_idr, tpnt->index); 4308 idr_remove(&st_index_idr, tpnt->index);
4246 spin_unlock(&st_index_lock); 4309 spin_unlock(&st_index_lock);
@@ -4298,6 +4361,7 @@ static void scsi_tape_release(struct kref *kref)
4298 4361
4299 disk->private_data = NULL; 4362 disk->private_data = NULL;
4300 put_disk(disk); 4363 put_disk(disk);
4364 kfree(tpnt->stats);
4301 kfree(tpnt); 4365 kfree(tpnt);
4302 return; 4366 return;
4303} 4367}
@@ -4513,6 +4577,184 @@ options_show(struct device *dev, struct device_attribute *attr, char *buf)
4513} 4577}
4514static DEVICE_ATTR_RO(options); 4578static DEVICE_ATTR_RO(options);
4515 4579
4580/* Support for tape stats */
4581
4582/**
4583 * read_cnt_show - return read count - count of reads made from tape drive
4584 * @dev: struct device
4585 * @attr: attribute structure
4586 * @buf: buffer to return formatted data in
4587 */
4588static ssize_t read_cnt_show(struct device *dev,
4589 struct device_attribute *attr, char *buf)
4590{
4591 struct st_modedef *STm = dev_get_drvdata(dev);
4592
4593 return sprintf(buf, "%lld",
4594 (long long)atomic64_read(&STm->tape->stats->read_cnt));
4595}
4596static DEVICE_ATTR_RO(read_cnt);
4597
4598/**
4599 * read_byte_cnt_show - return read byte count - tape drives
4600 * may use blocks less than 512 bytes this gives the raw byte count of
4601 * of data read from the tape drive.
4602 * @dev: struct device
4603 * @attr: attribute structure
4604 * @buf: buffer to return formatted data in
4605 */
4606static ssize_t read_byte_cnt_show(struct device *dev,
4607 struct device_attribute *attr, char *buf)
4608{
4609 struct st_modedef *STm = dev_get_drvdata(dev);
4610
4611 return sprintf(buf, "%lld",
4612 (long long)atomic64_read(&STm->tape->stats->read_byte_cnt));
4613}
4614static DEVICE_ATTR_RO(read_byte_cnt);
4615
4616/**
4617 * read_us_show - return read us - overall time spent waiting on reads in ns.
4618 * @dev: struct device
4619 * @attr: attribute structure
4620 * @buf: buffer to return formatted data in
4621 */
4622static ssize_t read_ns_show(struct device *dev,
4623 struct device_attribute *attr, char *buf)
4624{
4625 struct st_modedef *STm = dev_get_drvdata(dev);
4626
4627 return sprintf(buf, "%lld",
4628 (long long)atomic64_read(&STm->tape->stats->tot_read_time));
4629}
4630static DEVICE_ATTR_RO(read_ns);
4631
4632/**
4633 * write_cnt_show - write count - number of user calls
4634 * to write(2) that have written data to tape.
4635 * @dev: struct device
4636 * @attr: attribute structure
4637 * @buf: buffer to return formatted data in
4638 */
4639static ssize_t write_cnt_show(struct device *dev,
4640 struct device_attribute *attr, char *buf)
4641{
4642 struct st_modedef *STm = dev_get_drvdata(dev);
4643
4644 return sprintf(buf, "%lld",
4645 (long long)atomic64_read(&STm->tape->stats->write_cnt));
4646}
4647static DEVICE_ATTR_RO(write_cnt);
4648
4649/**
4650 * write_byte_cnt_show - write byte count - raw count of
4651 * bytes written to tape.
4652 * @dev: struct device
4653 * @attr: attribute structure
4654 * @buf: buffer to return formatted data in
4655 */
4656static ssize_t write_byte_cnt_show(struct device *dev,
4657 struct device_attribute *attr, char *buf)
4658{
4659 struct st_modedef *STm = dev_get_drvdata(dev);
4660
4661 return sprintf(buf, "%lld",
4662 (long long)atomic64_read(&STm->tape->stats->write_byte_cnt));
4663}
4664static DEVICE_ATTR_RO(write_byte_cnt);
4665
4666/**
4667 * write_ns_show - write ns - number of nanoseconds waiting on write
4668 * requests to complete.
4669 * @dev: struct device
4670 * @attr: attribute structure
4671 * @buf: buffer to return formatted data in
4672 */
4673static ssize_t write_ns_show(struct device *dev,
4674 struct device_attribute *attr, char *buf)
4675{
4676 struct st_modedef *STm = dev_get_drvdata(dev);
4677
4678 return sprintf(buf, "%lld",
4679 (long long)atomic64_read(&STm->tape->stats->tot_write_time));
4680}
4681static DEVICE_ATTR_RO(write_ns);
4682
4683/**
4684 * in_flight_show - number of I/Os currently in flight -
4685 * in most cases this will be either 0 or 1. It may be higher if someone
4686 * has also issued other SCSI commands such as via an ioctl.
4687 * @dev: struct device
4688 * @attr: attribute structure
4689 * @buf: buffer to return formatted data in
4690 */
4691static ssize_t in_flight_show(struct device *dev,
4692 struct device_attribute *attr, char *buf)
4693{
4694 struct st_modedef *STm = dev_get_drvdata(dev);
4695
4696 return sprintf(buf, "%lld",
4697 (long long)atomic64_read(&STm->tape->stats->in_flight));
4698}
4699static DEVICE_ATTR_RO(in_flight);
4700
4701/**
4702 * io_ns_show - io wait ns - this is the number of ns spent
4703 * waiting on all I/O to complete. This includes tape movement commands
4704 * such as rewinding, seeking to end of file or tape, it also includes
4705 * read and write. To determine the time spent on tape movement
4706 * subtract the read and write ns from this value.
4707 * @dev: struct device
4708 * @attr: attribute structure
4709 * @buf: buffer to return formatted data in
4710 */
4711static ssize_t io_ns_show(struct device *dev,
4712 struct device_attribute *attr, char *buf)
4713{
4714 struct st_modedef *STm = dev_get_drvdata(dev);
4715
4716 return sprintf(buf, "%lld",
4717 (long long)atomic64_read(&STm->tape->stats->tot_io_time));
4718}
4719static DEVICE_ATTR_RO(io_ns);
4720
4721/**
4722 * other_cnt_show - other io count - this is the number of
4723 * I/O requests other than read and write requests.
4724 * Typically these are tape movement requests but will include driver
4725 * tape movement. This includes only requests issued by the st driver.
4726 * @dev: struct device
4727 * @attr: attribute structure
4728 * @buf: buffer to return formatted data in
4729 */
4730static ssize_t other_cnt_show(struct device *dev,
4731 struct device_attribute *attr, char *buf)
4732{
4733 struct st_modedef *STm = dev_get_drvdata(dev);
4734
4735 return sprintf(buf, "%lld",
4736 (long long)atomic64_read(&STm->tape->stats->other_cnt));
4737}
4738static DEVICE_ATTR_RO(other_cnt);
4739
4740/**
4741 * resid_cnt_show - A count of the number of times we get a residual
4742 * count - this should indicate someone issuing reads larger than the
4743 * block size on tape.
4744 * @dev: struct device
4745 * @attr: attribute structure
4746 * @buf: buffer to return formatted data in
4747 */
4748static ssize_t resid_cnt_show(struct device *dev,
4749 struct device_attribute *attr, char *buf)
4750{
4751 struct st_modedef *STm = dev_get_drvdata(dev);
4752
4753 return sprintf(buf, "%lld",
4754 (long long)atomic64_read(&STm->tape->stats->resid_cnt));
4755}
4756static DEVICE_ATTR_RO(resid_cnt);
4757
4516static struct attribute *st_dev_attrs[] = { 4758static struct attribute *st_dev_attrs[] = {
4517 &dev_attr_defined.attr, 4759 &dev_attr_defined.attr,
4518 &dev_attr_default_blksize.attr, 4760 &dev_attr_default_blksize.attr,
@@ -4521,7 +4763,35 @@ static struct attribute *st_dev_attrs[] = {
4521 &dev_attr_options.attr, 4763 &dev_attr_options.attr,
4522 NULL, 4764 NULL,
4523}; 4765};
4524ATTRIBUTE_GROUPS(st_dev); 4766
4767static struct attribute *st_stats_attrs[] = {
4768 &dev_attr_read_cnt.attr,
4769 &dev_attr_read_byte_cnt.attr,
4770 &dev_attr_read_ns.attr,
4771 &dev_attr_write_cnt.attr,
4772 &dev_attr_write_byte_cnt.attr,
4773 &dev_attr_write_ns.attr,
4774 &dev_attr_in_flight.attr,
4775 &dev_attr_io_ns.attr,
4776 &dev_attr_other_cnt.attr,
4777 &dev_attr_resid_cnt.attr,
4778 NULL,
4779};
4780
4781static struct attribute_group stats_group = {
4782 .name = "stats",
4783 .attrs = st_stats_attrs,
4784};
4785
4786static struct attribute_group st_group = {
4787 .attrs = st_dev_attrs,
4788};
4789
4790static const struct attribute_group *st_dev_groups[] = {
4791 &st_group,
4792 &stats_group,
4793 NULL,
4794};
4525 4795
4526/* The following functions may be useful for a larger audience. */ 4796/* The following functions may be useful for a larger audience. */
4527static int sgl_map_user_pages(struct st_buffer *STbp, 4797static int sgl_map_user_pages(struct st_buffer *STbp,
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index f3eee0f9f40c..b6486b5d8681 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -92,6 +92,27 @@ struct st_partstat {
92 int drv_file; 92 int drv_file;
93}; 93};
94 94
95/* Tape statistics */
96struct scsi_tape_stats {
97 atomic64_t read_byte_cnt; /* bytes read */
98 atomic64_t write_byte_cnt; /* bytes written */
99 atomic64_t in_flight; /* Number of I/Os in flight */
100 atomic64_t read_cnt; /* Count of read requests */
101 atomic64_t write_cnt; /* Count of write requests */
102 atomic64_t other_cnt; /* Count of other requests either
103 * implicit or from user space
104 * ioctl. */
105 atomic64_t resid_cnt; /* Count of resid_len > 0 */
106 atomic64_t tot_read_time; /* ktime spent completing reads */
107 atomic64_t tot_write_time; /* ktime spent completing writes */
108 atomic64_t tot_io_time; /* ktime spent doing any I/O */
109 ktime_t read_time; /* holds ktime request was queued */
110 ktime_t write_time; /* holds ktime request was queued */
111 ktime_t other_time; /* holds ktime request was queued */
112 atomic_t last_read_size; /* Number of bytes issued for last read */
113 atomic_t last_write_size; /* Number of bytes issued for last write */
114};
115
95#define ST_NBR_PARTITIONS 4 116#define ST_NBR_PARTITIONS 4
96 117
97/* The tape drive descriptor */ 118/* The tape drive descriptor */
@@ -171,6 +192,7 @@ struct scsi_tape {
171#endif 192#endif
172 struct gendisk *disk; 193 struct gendisk *disk;
173 struct kref kref; 194 struct kref kref;
195 struct scsi_tape_stats *stats;
174}; 196};
175 197
176/* Bit masks for use_pf */ 198/* Bit masks for use_pf */
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index 0b7819f3e09b..5bdcbe8fa958 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -838,7 +838,6 @@ static struct scsi_host_template driver_template = {
838 .can_queue = 1, 838 .can_queue = 1,
839 .this_id = SYM53C416_SCSI_ID, 839 .this_id = SYM53C416_SCSI_ID,
840 .sg_tablesize = 32, 840 .sg_tablesize = 32,
841 .cmd_per_lun = 1,
842 .unchecked_isa_dma = 1, 841 .unchecked_isa_dma = 1,
843 .use_clustering = ENABLE_CLUSTERING, 842 .use_clustering = ENABLE_CLUSTERING,
844}; 843};
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 8a1f4b355416..e94538362536 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -73,7 +73,7 @@ config SCSI_UFSHCD_PLATFORM
73 73
74config SCSI_UFS_QCOM 74config SCSI_UFS_QCOM
75 bool "QCOM specific hooks to UFS controller platform driver" 75 bool "QCOM specific hooks to UFS controller platform driver"
76 depends on SCSI_UFSHCD_PLATFORM && ARCH_MSM 76 depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
77 select PHY_QCOM_UFS 77 select PHY_QCOM_UFS
78 help 78 help
79 This selects the QCOM specific additions to UFSHCD platform driver. 79 This selects the QCOM specific additions to UFSHCD platform driver.
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 6652a8171de6..4cdffa46d401 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -307,6 +307,7 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, bool status)
307static unsigned long 307static unsigned long
308ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate) 308ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate)
309{ 309{
310 struct ufs_qcom_host *host = hba->priv;
310 struct ufs_clk_info *clki; 311 struct ufs_clk_info *clki;
311 u32 core_clk_period_in_ns; 312 u32 core_clk_period_in_ns;
312 u32 tx_clk_cycles_per_us = 0; 313 u32 tx_clk_cycles_per_us = 0;
@@ -330,6 +331,16 @@ ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate)
330 {UFS_HS_G2, 0x49}, 331 {UFS_HS_G2, 0x49},
331 }; 332 };
332 333
334 /*
335 * The Qunipro controller does not use following registers:
336 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
337 * UFS_REG_PA_LINK_STARTUP_TIMER
338 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
339 * Aggregation logic.
340 */
341 if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
342 goto out;
343
333 if (gear == 0) { 344 if (gear == 0) {
334 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); 345 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
335 goto out_error; 346 goto out_error;
@@ -683,6 +694,16 @@ out:
683 return ret; 694 return ret;
684} 695}
685 696
697static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
698{
699 struct ufs_qcom_host *host = hba->priv;
700
701 if (host->hw_ver.major == 0x1)
702 return UFSHCI_VERSION_11;
703 else
704 return UFSHCI_VERSION_20;
705}
706
686/** 707/**
687 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks 708 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
688 * @hba: host controller instance 709 * @hba: host controller instance
@@ -696,13 +717,24 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
696{ 717{
697 struct ufs_qcom_host *host = hba->priv; 718 struct ufs_qcom_host *host = hba->priv;
698 719
699 if (host->hw_ver.major == 0x1) 720 if (host->hw_ver.major == 0x01) {
700 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS; 721 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
722 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
723 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
724
725 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
726 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
727 }
701 728
702 if (host->hw_ver.major >= 0x2) { 729 if (host->hw_ver.major >= 0x2) {
730 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
731 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
732
703 if (!ufs_qcom_cap_qunipro(host)) 733 if (!ufs_qcom_cap_qunipro(host))
704 /* Legacy UniPro mode still need following quirks */ 734 /* Legacy UniPro mode still need following quirks */
705 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS; 735 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
736 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
737 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
706 } 738 }
707} 739}
708 740
@@ -1005,6 +1037,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1005 .name = "qcom", 1037 .name = "qcom",
1006 .init = ufs_qcom_init, 1038 .init = ufs_qcom_init,
1007 .exit = ufs_qcom_exit, 1039 .exit = ufs_qcom_exit,
1040 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
1008 .clk_scale_notify = ufs_qcom_clk_scale_notify, 1041 .clk_scale_notify = ufs_qcom_clk_scale_notify,
1009 .setup_clocks = ufs_qcom_setup_clocks, 1042 .setup_clocks = ufs_qcom_setup_clocks,
1010 .hce_enable_notify = ufs_qcom_hce_enable_notify, 1043 .hce_enable_notify = ufs_qcom_hce_enable_notify,
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 648a44675880..b0ade73f8c6a 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -188,6 +188,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
188static irqreturn_t ufshcd_intr(int irq, void *__hba); 188static irqreturn_t ufshcd_intr(int irq, void *__hba);
189static int ufshcd_config_pwr_mode(struct ufs_hba *hba, 189static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
190 struct ufs_pa_layer_attr *desired_pwr_mode); 190 struct ufs_pa_layer_attr *desired_pwr_mode);
191static int ufshcd_change_power_mode(struct ufs_hba *hba,
192 struct ufs_pa_layer_attr *pwr_mode);
191 193
192static inline int ufshcd_enable_irq(struct ufs_hba *hba) 194static inline int ufshcd_enable_irq(struct ufs_hba *hba)
193{ 195{
@@ -269,6 +271,11 @@ static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
269 */ 271 */
270static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) 272static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
271{ 273{
274 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) {
275 if (hba->vops && hba->vops->get_ufs_hci_version)
276 return hba->vops->get_ufs_hci_version(hba);
277 }
278
272 return ufshcd_readl(hba, REG_UFS_VERSION); 279 return ufshcd_readl(hba, REG_UFS_VERSION);
273} 280}
274 281
@@ -481,6 +488,15 @@ ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
481} 488}
482 489
483/** 490/**
491 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
492 * @hba: per adapter instance
493 */
494static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
495{
496 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
497}
498
499/**
484 * ufshcd_enable_run_stop_reg - Enable run-stop registers, 500 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
485 * When run-stop registers are set to 1, it indicates the 501 * When run-stop registers are set to 1, it indicates the
486 * host controller that it can process the requests 502 * host controller that it can process the requests
@@ -1326,7 +1342,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1326 lrbp->sense_buffer = cmd->sense_buffer; 1342 lrbp->sense_buffer = cmd->sense_buffer;
1327 lrbp->task_tag = tag; 1343 lrbp->task_tag = tag;
1328 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); 1344 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
1329 lrbp->intr_cmd = false; 1345 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
1330 lrbp->command_type = UTP_CMD_TYPE_SCSI; 1346 lrbp->command_type = UTP_CMD_TYPE_SCSI;
1331 1347
1332 /* form UPIU before issuing the command */ 1348 /* form UPIU before issuing the command */
@@ -2147,6 +2163,31 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
2147 }; 2163 };
2148 const char *get = action[!!peer]; 2164 const char *get = action[!!peer];
2149 int ret; 2165 int ret;
2166 struct ufs_pa_layer_attr orig_pwr_info;
2167 struct ufs_pa_layer_attr temp_pwr_info;
2168 bool pwr_mode_change = false;
2169
2170 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
2171 orig_pwr_info = hba->pwr_info;
2172 temp_pwr_info = orig_pwr_info;
2173
2174 if (orig_pwr_info.pwr_tx == FAST_MODE ||
2175 orig_pwr_info.pwr_rx == FAST_MODE) {
2176 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
2177 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
2178 pwr_mode_change = true;
2179 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
2180 orig_pwr_info.pwr_rx == SLOW_MODE) {
2181 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
2182 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
2183 pwr_mode_change = true;
2184 }
2185 if (pwr_mode_change) {
2186 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
2187 if (ret)
2188 goto out;
2189 }
2190 }
2150 2191
2151 uic_cmd.command = peer ? 2192 uic_cmd.command = peer ?
2152 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; 2193 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
@@ -2161,6 +2202,10 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
2161 2202
2162 if (mib_val) 2203 if (mib_val)
2163 *mib_val = uic_cmd.argument3; 2204 *mib_val = uic_cmd.argument3;
2205
2206 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
2207 && pwr_mode_change)
2208 ufshcd_change_power_mode(hba, &orig_pwr_info);
2164out: 2209out:
2165 return ret; 2210 return ret;
2166} 2211}
@@ -2249,6 +2294,16 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
2249 struct uic_command uic_cmd = {0}; 2294 struct uic_command uic_cmd = {0};
2250 int ret; 2295 int ret;
2251 2296
2297 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
2298 ret = ufshcd_dme_set(hba,
2299 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
2300 if (ret) {
2301 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
2302 __func__, ret);
2303 goto out;
2304 }
2305 }
2306
2252 uic_cmd.command = UIC_CMD_DME_SET; 2307 uic_cmd.command = UIC_CMD_DME_SET;
2253 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); 2308 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
2254 uic_cmd.argument3 = mode; 2309 uic_cmd.argument3 = mode;
@@ -2256,6 +2311,7 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
2256 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 2311 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2257 ufshcd_release(hba); 2312 ufshcd_release(hba);
2258 2313
2314out:
2259 return ret; 2315 return ret;
2260} 2316}
2261 2317
@@ -2522,7 +2578,10 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
2522 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); 2578 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
2523 2579
2524 /* Configure interrupt aggregation */ 2580 /* Configure interrupt aggregation */
2525 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); 2581 if (ufshcd_is_intr_aggr_allowed(hba))
2582 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
2583 else
2584 ufshcd_disable_intr_aggr(hba);
2526 2585
2527 /* Configure UTRL and UTMRL base address registers */ 2586 /* Configure UTRL and UTMRL base address registers */
2528 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), 2587 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
@@ -2628,6 +2687,42 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
2628 return 0; 2687 return 0;
2629} 2688}
2630 2689
2690static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
2691{
2692 int tx_lanes, i, err = 0;
2693
2694 if (!peer)
2695 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
2696 &tx_lanes);
2697 else
2698 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
2699 &tx_lanes);
2700 for (i = 0; i < tx_lanes; i++) {
2701 if (!peer)
2702 err = ufshcd_dme_set(hba,
2703 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
2704 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
2705 0);
2706 else
2707 err = ufshcd_dme_peer_set(hba,
2708 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
2709 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
2710 0);
2711 if (err) {
2712 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
2713 __func__, peer, i, err);
2714 break;
2715 }
2716 }
2717
2718 return err;
2719}
2720
2721static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
2722{
2723 return ufshcd_disable_tx_lcc(hba, true);
2724}
2725
2631/** 2726/**
2632 * ufshcd_link_startup - Initialize unipro link startup 2727 * ufshcd_link_startup - Initialize unipro link startup
2633 * @hba: per adapter instance 2728 * @hba: per adapter instance
@@ -2665,6 +2760,12 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
2665 /* failed to get the link up... retire */ 2760 /* failed to get the link up... retire */
2666 goto out; 2761 goto out;
2667 2762
2763 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
2764 ret = ufshcd_disable_device_tx_lcc(hba);
2765 if (ret)
2766 goto out;
2767 }
2768
2668 /* Include any host controller configuration via UIC commands */ 2769 /* Include any host controller configuration via UIC commands */
2669 if (hba->vops && hba->vops->link_startup_notify) { 2770 if (hba->vops && hba->vops->link_startup_notify) {
2670 ret = hba->vops->link_startup_notify(hba, POST_CHANGE); 2771 ret = hba->vops->link_startup_notify(hba, POST_CHANGE);
@@ -3073,7 +3174,8 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
3073 * false interrupt if device completes another request after resetting 3174 * false interrupt if device completes another request after resetting
3074 * aggregation and before reading the DB. 3175 * aggregation and before reading the DB.
3075 */ 3176 */
3076 ufshcd_reset_intr_aggr(hba); 3177 if (ufshcd_is_intr_aggr_allowed(hba))
3178 ufshcd_reset_intr_aggr(hba);
3077 3179
3078 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 3180 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3079 completed_reqs = tr_doorbell ^ hba->outstanding_reqs; 3181 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index b47ff07698e8..c40a0e78a6c4 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -246,6 +246,7 @@ struct ufs_pwr_mode_info {
246 * @name: variant name 246 * @name: variant name
247 * @init: called when the driver is initialized 247 * @init: called when the driver is initialized
248 * @exit: called to cleanup everything done in init 248 * @exit: called to cleanup everything done in init
249 * @get_ufs_hci_version: called to get UFS HCI version
249 * @clk_scale_notify: notifies that clks are scaled up/down 250 * @clk_scale_notify: notifies that clks are scaled up/down
250 * @setup_clocks: called before touching any of the controller registers 251 * @setup_clocks: called before touching any of the controller registers
251 * @setup_regulators: called before accessing the host controller 252 * @setup_regulators: called before accessing the host controller
@@ -263,6 +264,7 @@ struct ufs_hba_variant_ops {
263 const char *name; 264 const char *name;
264 int (*init)(struct ufs_hba *); 265 int (*init)(struct ufs_hba *);
265 void (*exit)(struct ufs_hba *); 266 void (*exit)(struct ufs_hba *);
267 u32 (*get_ufs_hci_version)(struct ufs_hba *);
266 void (*clk_scale_notify)(struct ufs_hba *); 268 void (*clk_scale_notify)(struct ufs_hba *);
267 int (*setup_clocks)(struct ufs_hba *, bool); 269 int (*setup_clocks)(struct ufs_hba *, bool);
268 int (*setup_regulators)(struct ufs_hba *, bool); 270 int (*setup_regulators)(struct ufs_hba *, bool);
@@ -417,11 +419,45 @@ struct ufs_hba {
417 unsigned int irq; 419 unsigned int irq;
418 bool is_irq_enabled; 420 bool is_irq_enabled;
419 421
422 /* Interrupt aggregation support is broken */
423 #define UFSHCD_QUIRK_BROKEN_INTR_AGGR UFS_BIT(0)
424
420 /* 425 /*
421 * delay before each dme command is required as the unipro 426 * delay before each dme command is required as the unipro
422 * layer has shown instabilities 427 * layer has shown instabilities
423 */ 428 */
424 #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(0) 429 #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(1)
430
431 /*
432 * If UFS host controller is having issue in processing LCC (Line
433 * Control Command) coming from device then enable this quirk.
434 * When this quirk is enabled, host controller driver should disable
435 * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
436 * attribute of device to 0).
437 */
438 #define UFSHCD_QUIRK_BROKEN_LCC UFS_BIT(2)
439
440 /*
441 * The attribute PA_RXHSUNTERMCAP specifies whether or not the
442 * inbound Link supports unterminated line in HS mode. Setting this
443 * attribute to 1 fixes moving to HS gear.
444 */
445 #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP UFS_BIT(3)
446
447 /*
448 * This quirk needs to be enabled if the host contoller only allows
449 * accessing the peer dme attributes in AUTO mode (FAST AUTO or
450 * SLOW AUTO).
451 */
452 #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE UFS_BIT(4)
453
454 /*
455 * This quirk needs to be enabled if the host contoller doesn't
456 * advertise the correct version in UFS_VER register. If this quirk
457 * is enabled, standard UFS host driver will call the vendor specific
458 * ops (get_ufs_hci_version) to get the correct version.
459 */
460 #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5)
425 461
426 unsigned int quirks; /* Deviations from standard UFSHCI spec. */ 462 unsigned int quirks; /* Deviations from standard UFSHCI spec. */
427 463
@@ -478,6 +514,12 @@ struct ufs_hba {
478#define UFSHCD_CAP_CLK_SCALING (1 << 2) 514#define UFSHCD_CAP_CLK_SCALING (1 << 2)
479 /* Allow auto bkops to enabled during runtime suspend */ 515 /* Allow auto bkops to enabled during runtime suspend */
480#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3) 516#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
517 /*
518 * This capability allows host controller driver to use the UFS HCI's
519 * interrupt aggregation capability.
520 * CAUTION: Enabling this might reduce overall UFS throughput.
521 */
522#define UFSHCD_CAP_INTR_AGGR (1 << 4)
481 523
482 struct devfreq *devfreq; 524 struct devfreq *devfreq;
483 struct ufs_clk_scaling clk_scaling; 525 struct ufs_clk_scaling clk_scaling;
@@ -502,6 +544,15 @@ static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
502 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND; 544 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
503} 545}
504 546
547static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
548{
549 if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
550 !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
551 return true;
552 else
553 return false;
554}
555
505#define ufshcd_writel(hba, val, reg) \ 556#define ufshcd_writel(hba, val, reg) \
506 writel((val), (hba)->mmio_base + (reg)) 557 writel((val), (hba)->mmio_base + (reg))
507#define ufshcd_readl(hba, reg) \ 558#define ufshcd_readl(hba, reg) \
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index d5721199e9cc..0ae0967aaed8 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -89,8 +89,9 @@ enum {
89 89
90/* Controller UFSHCI version */ 90/* Controller UFSHCI version */
91enum { 91enum {
92 UFSHCI_VERSION_10 = 0x00010000, 92 UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */
93 UFSHCI_VERSION_11 = 0x00010100, 93 UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */
94 UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */
94}; 95};
95 96
96/* 97/*
@@ -206,6 +207,9 @@ enum {
206#define CONFIG_RESULT_CODE_MASK 0xFF 207#define CONFIG_RESULT_CODE_MASK 0xFF
207#define GENERIC_ERROR_CODE_MASK 0xFF 208#define GENERIC_ERROR_CODE_MASK 0xFF
208 209
210/* GenSelectorIndex calculation macros for M-PHY attributes */
211#define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane)
212
209#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\ 213#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
210 ((sel) & 0xFFFF)) 214 ((sel) & 0xFFFF))
211#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0) 215#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index f164f24a4a55..285f77544c36 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -501,6 +501,7 @@ static void virtio_scsi_init_hdr(struct virtio_device *vdev,
501 cmd->crn = 0; 501 cmd->crn = 0;
502} 502}
503 503
504#ifdef CONFIG_BLK_DEV_INTEGRITY
504static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev, 505static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
505 struct virtio_scsi_cmd_req_pi *cmd_pi, 506 struct virtio_scsi_cmd_req_pi *cmd_pi,
506 struct scsi_cmnd *sc) 507 struct scsi_cmnd *sc)
@@ -524,6 +525,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
524 blk_rq_sectors(rq) * 525 blk_rq_sectors(rq) *
525 bi->tuple_size); 526 bi->tuple_size);
526} 527}
528#endif
527 529
528static int virtscsi_queuecommand(struct virtio_scsi *vscsi, 530static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
529 struct virtio_scsi_vq *req_vq, 531 struct virtio_scsi_vq *req_vq,
@@ -546,11 +548,14 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
546 548
547 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); 549 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
548 550
551#ifdef CONFIG_BLK_DEV_INTEGRITY
549 if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) { 552 if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
550 virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc); 553 virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
551 memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len); 554 memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
552 req_size = sizeof(cmd->req.cmd_pi); 555 req_size = sizeof(cmd->req.cmd_pi);
553 } else { 556 } else
557#endif
558 {
554 virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc); 559 virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
555 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); 560 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
556 req_size = sizeof(cmd->req.cmd); 561 req_size = sizeof(cmd->req.cmd);
@@ -1002,6 +1007,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
1002 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; 1007 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
1003 shost->nr_hw_queues = num_queues; 1008 shost->nr_hw_queues = num_queues;
1004 1009
1010#ifdef CONFIG_BLK_DEV_INTEGRITY
1005 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { 1011 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
1006 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | 1012 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
1007 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | 1013 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
@@ -1010,6 +1016,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
1010 scsi_host_set_prot(shost, host_prot); 1016 scsi_host_set_prot(shost, host_prot);
1011 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 1017 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
1012 } 1018 }
1019#endif
1013 1020
1014 err = scsi_add_host(shost, &vdev->dev); 1021 err = scsi_add_host(shost, &vdev->dev);
1015 if (err) 1022 if (err)
@@ -1090,7 +1097,9 @@ static struct virtio_device_id id_table[] = {
1090static unsigned int features[] = { 1097static unsigned int features[] = {
1091 VIRTIO_SCSI_F_HOTPLUG, 1098 VIRTIO_SCSI_F_HOTPLUG,
1092 VIRTIO_SCSI_F_CHANGE, 1099 VIRTIO_SCSI_F_CHANGE,
1100#ifdef CONFIG_BLK_DEV_INTEGRITY
1093 VIRTIO_SCSI_F_T10_PI, 1101 VIRTIO_SCSI_F_T10_PI,
1102#endif
1094}; 1103};
1095 1104
1096static struct virtio_driver virtio_scsi_driver = { 1105static struct virtio_driver virtio_scsi_driver = {
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 289ad016d925..61346aa73178 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -882,7 +882,6 @@ static struct scsi_host_template wd719x_template = {
882 .can_queue = 255, 882 .can_queue = 255,
883 .this_id = 7, 883 .this_id = 7,
884 .sg_tablesize = WD719X_SG, 884 .sg_tablesize = WD719X_SG,
885 .cmd_per_lun = WD719X_CMD_PER_LUN,
886 .use_clustering = ENABLE_CLUSTERING, 885 .use_clustering = ENABLE_CLUSTERING,
887}; 886};
888 887
diff --git a/drivers/scsi/wd719x.h b/drivers/scsi/wd719x.h
index 185e30e4eb93..9c6dd45f95f5 100644
--- a/drivers/scsi/wd719x.h
+++ b/drivers/scsi/wd719x.h
@@ -2,8 +2,6 @@
2#define _WD719X_H_ 2#define _WD719X_H_
3 3
4#define WD719X_SG 255 /* Scatter/gather size */ 4#define WD719X_SG 255 /* Scatter/gather size */
5#define WD719X_CMD_PER_LUN 1 /* We should be able to do linked commands, but
6 * this is 1 for now to be safe. */
7 5
8struct wd719x_sglist { 6struct wd719x_sglist {
9 __le32 ptr; 7 __le32 ptr;