aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-23 13:36:19 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-23 13:36:19 -0500
commitf290cbacb697b7bc8fc67d3988e330bec0e502ea (patch)
treeef17237c0625c5265bb2739a7402c9bacd52e981
parent42e0372c0e7ea3617a4ab28c7f83ce66cb0f868d (diff)
parent3eff4c782857d284dc3b11c6db0cab4a263427b7 (diff)
Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull late SCSI updates from James Bottomley: "This is mostly stuff which missed the initial pull. There's a new driver: qedi, and some ufs, ibmvscsis and ncr5380 updates plus some assorted driver fixes and also a fix for the bug where if a device goes into a blocked state between configuration and sysfs device add (which can be a long time under async probing) it would become permanently blocked" * tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (30 commits) scsi: avoid a permanent stop of the scsi device's request queue scsi: mpt3sas: Recognize and act on iopriority info scsi: qla2xxx: Fix Target mode handling with Multiqueue changes. scsi: qla2xxx: Add Block Multi Queue functionality. scsi: qla2xxx: Add multiple queue pair functionality. scsi: qla2xxx: Utilize pci_alloc_irq_vectors/pci_free_irq_vectors calls. scsi: qla2xxx: Only allow operational MBX to proceed during RESET. scsi: hpsa: remove memory allocate failure message scsi: Update 3ware driver email addresses scsi: zfcp: fix rport unblock race with LUN recovery scsi: zfcp: do not trace pure benign residual HBA responses at default level scsi: zfcp: fix use-after-"free" in FC ingress path after TMF scsi: libcxgbi: return error if interface is not up scsi: cxgb4i: libcxgbi: add missing module_put() scsi: cxgb4i: libcxgbi: cxgb4: add T6 iSCSI completion feature scsi: cxgb4i: libcxgbi: add active open cmd for T6 adapters scsi: cxgb4i: use cxgb4_tp_smt_idx() to get smt_idx scsi: qedi: Add QLogic FastLinQ offload iSCSI driver framework. scsi: aacraid: remove wildcard for series 9 controllers scsi: ibmvscsi: add write memory barrier to CRQ processing ...
-rw-r--r--Documentation/scsi/g_NCR5380.txt46
-rw-r--r--MAINTAINERS8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h13
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c17
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h41
-rw-r--r--drivers/s390/scsi/zfcp_erp.c61
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h3
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h30
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c61
-rw-r--r--drivers/scsi/3w-9xxx.c9
-rw-r--r--drivers/scsi/3w-9xxx.h9
-rw-r--r--drivers/scsi/3w-sas.c7
-rw-r--r--drivers/scsi/3w-sas.h7
-rw-r--r--drivers/scsi/3w-xxxx.c7
-rw-r--r--drivers/scsi/3w-xxxx.h5
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/NCR5380.c77
-rw-r--r--drivers/scsi/NCR5380.h11
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c320
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c40
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h2
-rw-r--r--drivers/scsi/g_NCR5380.c153
-rw-r--r--drivers/scsi/g_NCR5380.h2
-rw-r--r--drivers/scsi/hpsa.c37
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c7
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c43
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c34
-rw-r--r--drivers/scsi/qedi/Kconfig10
-rw-r--r--drivers/scsi/qedi/Makefile5
-rw-r--r--drivers/scsi/qedi/qedi.h364
-rw-r--r--drivers/scsi/qedi/qedi_dbg.c143
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h144
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c244
-rw-r--r--drivers/scsi/qedi/qedi_fw.c2378
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h73
-rw-r--r--drivers/scsi/qedi/qedi_hsi.h52
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c1624
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h232
-rw-r--r--drivers/scsi/qedi/qedi_main.c2127
-rw-r--r--drivers/scsi/qedi/qedi_sysfs.c52
-rw-r--r--drivers/scsi/qedi/qedi_version.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h108
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c173
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h30
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c407
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c223
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c85
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c116
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c475
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c44
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h1
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h30
-rw-r--r--drivers/scsi/ufs/ufshcd.c55
-rw-r--r--drivers/scsi/ufs/ufshcd.h12
-rw-r--r--drivers/scsi/ufs/ufshci.h7
64 files changed, 9670 insertions, 695 deletions
diff --git a/Documentation/scsi/g_NCR5380.txt b/Documentation/scsi/g_NCR5380.txt
index e2c187947e58..37b1967a00a9 100644
--- a/Documentation/scsi/g_NCR5380.txt
+++ b/Documentation/scsi/g_NCR5380.txt
@@ -6,17 +6,15 @@ NCR53c400 extensions (c) 1994,1995,1996 Kevin Lentin
6This file documents the NCR53c400 extensions by Kevin Lentin and some 6This file documents the NCR53c400 extensions by Kevin Lentin and some
7enhancements to the NCR5380 core. 7enhancements to the NCR5380 core.
8 8
9This driver supports both NCR5380 and NCR53c400 cards in port or memory 9This driver supports NCR5380 and NCR53c400 and compatible cards in port or
10mapped modes. Currently this driver can only support one of those mapping 10memory mapped modes.
11modes at a time but it does support both of these chips at the same time.
12The next release of this driver will support port & memory mapped cards at
13the same time. It should be able to handle multiple different cards in the
14same machine.
15 11
16The drivers/scsi/Makefile has an override in it for the most common 12Use of an interrupt is recommended, if supported by the board, as this will
17NCR53c400 card, the Trantor T130B in its default configuration: 13allow targets to disconnect and thereby improve SCSI bus utilization.
18 Port: 0x350 14
19 IRQ : 5 15If the irq parameter is 254 or is omitted entirely, the driver will probe
16for the correct IRQ line automatically. If the irq parameter is 0 or 255
17then no IRQ will be used.
20 18
21The NCR53c400 does not support DMA but it does have Pseudo-DMA which is 19The NCR53c400 does not support DMA but it does have Pseudo-DMA which is
22supported by the driver. 20supported by the driver.
@@ -47,22 +45,24 @@ These old-style parameters can support only one card:
47 dtc_3181e=1 to set up for a Domex Technology Corp 3181E board 45 dtc_3181e=1 to set up for a Domex Technology Corp 3181E board
48 hp_c2502=1 to set up for a Hewlett Packard C2502 board 46 hp_c2502=1 to set up for a Hewlett Packard C2502 board
49 47
50e.g. 48E.g. Trantor T130B in its default configuration:
51OLD: modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1 49modprobe g_NCR5380 irq=5 base=0x350 card=1
52NEW: modprobe g_NCR5380 irq=5 base=0x350 card=0 50or alternatively, using the old syntax,
53 for a port mapped NCR5380 board or 51modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_53c400=1
54
55OLD: modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
56NEW: modprobe g_NCR5380 irq=255 base=0xc8000 card=1
57 for a memory mapped NCR53C400 board with interrupts disabled or
58 52
59NEW: modprobe g_NCR5380 irq=0,7 base=0x240,0x300 card=3,4 53E.g. a port mapped NCR5380 board, driver to probe for IRQ:
60 for two cards: DTC3181 (in non-PnP mode) at 0x240 with no IRQ 54modprobe g_NCR5380 base=0x350 card=0
61 and HP C2502 at 0x300 with IRQ 7 55or alternatively,
56modprobe g_NCR5380 ncr_addr=0x350 ncr_5380=1
62 57
63(255 should be specified for no or DMA interrupt, 254 to autoprobe for an 58E.g. a memory mapped NCR53C400 board with no IRQ:
64 IRQ line if overridden on the command line.) 59modprobe g_NCR5380 irq=255 base=0xc8000 card=1
60or alternatively,
61modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
65 62
63E.g. two cards, DTC3181 (in non-PnP mode) at 0x240 with no IRQ
64and HP C2502 at 0x300 with IRQ 7:
65modprobe g_NCR5380 irq=0,7 base=0x240,0x300 card=3,4
66 66
67Kevin Lentin 67Kevin Lentin
68K.Lentin@cs.monash.edu.au 68K.Lentin@cs.monash.edu.au
diff --git a/MAINTAINERS b/MAINTAINERS
index c7b8cf1240d9..979126a9a150 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -143,7 +143,7 @@ S: Maintained
143F: drivers/net/ethernet/3com/typhoon* 143F: drivers/net/ethernet/3com/typhoon*
144 144
1453WARE SAS/SATA-RAID SCSI DRIVERS (3W-XXXX, 3W-9XXX, 3W-SAS) 1453WARE SAS/SATA-RAID SCSI DRIVERS (3W-XXXX, 3W-9XXX, 3W-SAS)
146M: Adam Radford <linuxraid@lsi.com> 146M: Adam Radford <aradford@gmail.com>
147L: linux-scsi@vger.kernel.org 147L: linux-scsi@vger.kernel.org
148W: http://www.lsi.com 148W: http://www.lsi.com
149S: Supported 149S: Supported
@@ -10136,6 +10136,12 @@ F: drivers/net/ethernet/qlogic/qed/
10136F: include/linux/qed/ 10136F: include/linux/qed/
10137F: drivers/net/ethernet/qlogic/qede/ 10137F: drivers/net/ethernet/qlogic/qede/
10138 10138
10139QLOGIC QL41xxx ISCSI DRIVER
10140M: QLogic-Storage-Upstream@cavium.com
10141L: linux-scsi@vger.kernel.org
10142S: Supported
10143F: drivers/scsi/qedi/
10144
10139QNX4 FILESYSTEM 10145QNX4 FILESYSTEM
10140M: Anders Larsen <al@alarsen.net> 10146M: Anders Larsen <al@alarsen.net>
10141W: http://www.alarsen.net/linux/qnx4fs/ 10147W: http://www.alarsen.net/linux/qnx4fs/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index fba3b2ad382d..a267173f5997 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -76,6 +76,7 @@ enum {
76 CPL_PASS_ESTABLISH = 0x41, 76 CPL_PASS_ESTABLISH = 0x41,
77 CPL_RX_DATA_DDP = 0x42, 77 CPL_RX_DATA_DDP = 0x42,
78 CPL_PASS_ACCEPT_REQ = 0x44, 78 CPL_PASS_ACCEPT_REQ = 0x44,
79 CPL_RX_ISCSI_CMP = 0x45,
79 CPL_TRACE_PKT_T5 = 0x48, 80 CPL_TRACE_PKT_T5 = 0x48,
80 CPL_RX_ISCSI_DDP = 0x49, 81 CPL_RX_ISCSI_DDP = 0x49,
81 82
@@ -934,6 +935,18 @@ struct cpl_iscsi_data {
934 __u8 status; 935 __u8 status;
935}; 936};
936 937
938struct cpl_rx_iscsi_cmp {
939 union opcode_tid ot;
940 __be16 pdu_len_ddp;
941 __be16 len;
942 __be32 seq;
943 __be16 urg;
944 __u8 rsvd;
945 __u8 status;
946 __be32 ulp_crc;
947 __be32 ddpvld;
948};
949
937struct cpl_tx_data_iso { 950struct cpl_tx_data_iso {
938 __be32 op_to_scsi; 951 __be32 op_to_scsi;
939 __u8 reserved1; 952 __u8 reserved1;
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 581001989937..d5bf36ec8a75 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -289,11 +289,12 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
289 289
290 290
291/** 291/**
292 * zfcp_dbf_rec_run - trace event related to running recovery 292 * zfcp_dbf_rec_run_lvl - trace event related to running recovery
293 * @level: trace level to be used for event
293 * @tag: identifier for event 294 * @tag: identifier for event
294 * @erp: erp_action running 295 * @erp: erp_action running
295 */ 296 */
296void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) 297void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
297{ 298{
298 struct zfcp_dbf *dbf = erp->adapter->dbf; 299 struct zfcp_dbf *dbf = erp->adapter->dbf;
299 struct zfcp_dbf_rec *rec = &dbf->rec_buf; 300 struct zfcp_dbf_rec *rec = &dbf->rec_buf;
@@ -319,11 +320,21 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
319 else 320 else
320 rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter); 321 rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
321 322
322 debug_event(dbf->rec, 1, rec, sizeof(*rec)); 323 debug_event(dbf->rec, level, rec, sizeof(*rec));
323 spin_unlock_irqrestore(&dbf->rec_lock, flags); 324 spin_unlock_irqrestore(&dbf->rec_lock, flags);
324} 325}
325 326
326/** 327/**
328 * zfcp_dbf_rec_run - trace event related to running recovery
329 * @tag: identifier for event
330 * @erp: erp_action running
331 */
332void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
333{
334 zfcp_dbf_rec_run_lvl(1, tag, erp);
335}
336
337/**
327 * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery 338 * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
328 * @tag: identifier for event 339 * @tag: identifier for event
329 * @wka_port: well known address port 340 * @wka_port: well known address port
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 36d07584271d..db186d44cfaf 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -2,7 +2,7 @@
2 * zfcp device driver 2 * zfcp device driver
3 * debug feature declarations 3 * debug feature declarations
4 * 4 *
5 * Copyright IBM Corp. 2008, 2015 5 * Copyright IBM Corp. 2008, 2016
6 */ 6 */
7 7
8#ifndef ZFCP_DBF_H 8#ifndef ZFCP_DBF_H
@@ -283,6 +283,30 @@ struct zfcp_dbf {
283 struct zfcp_dbf_scsi scsi_buf; 283 struct zfcp_dbf_scsi scsi_buf;
284}; 284};
285 285
286/**
287 * zfcp_dbf_hba_fsf_resp_suppress - true if we should not trace by default
288 * @req: request that has been completed
289 *
290 * Returns true if FCP response with only benign residual under count.
291 */
292static inline
293bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req)
294{
295 struct fsf_qtcb *qtcb = req->qtcb;
296 u32 fsf_stat = qtcb->header.fsf_status;
297 struct fcp_resp *fcp_rsp;
298 u8 rsp_flags, fr_status;
299
300 if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND)
301 return false; /* not an FCP response */
302 fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp;
303 rsp_flags = fcp_rsp->fr_flags;
304 fr_status = fcp_rsp->fr_status;
305 return (fsf_stat == FSF_FCP_RSP_AVAILABLE) &&
306 (rsp_flags == FCP_RESID_UNDER) &&
307 (fr_status == SAM_STAT_GOOD);
308}
309
286static inline 310static inline
287void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req) 311void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
288{ 312{
@@ -304,7 +328,9 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
304 zfcp_dbf_hba_fsf_resp("fs_perr", 1, req); 328 zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
305 329
306 } else if (qtcb->header.fsf_status != FSF_GOOD) { 330 } else if (qtcb->header.fsf_status != FSF_GOOD) {
307 zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req); 331 zfcp_dbf_hba_fsf_resp("fs_ferr",
332 zfcp_dbf_hba_fsf_resp_suppress(req)
333 ? 5 : 1, req);
308 334
309 } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || 335 } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
310 (req->fsf_command == FSF_QTCB_OPEN_LUN)) { 336 (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
@@ -388,4 +414,15 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
388 _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL); 414 _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
389} 415}
390 416
417/**
418 * zfcp_dbf_scsi_nullcmnd() - trace NULLify of SCSI command in dev/tgt-reset.
419 * @scmnd: SCSI command that was NULLified.
420 * @fsf_req: request that owned @scmnd.
421 */
422static inline void zfcp_dbf_scsi_nullcmnd(struct scsi_cmnd *scmnd,
423 struct zfcp_fsf_req *fsf_req)
424{
425 _zfcp_dbf_scsi("scfc__1", 3, scmnd, fsf_req);
426}
427
391#endif /* ZFCP_DBF_H */ 428#endif /* ZFCP_DBF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index a59d678125bd..7ccfce559034 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Error Recovery Procedures (ERP). 4 * Error Recovery Procedures (ERP).
5 * 5 *
6 * Copyright IBM Corp. 2002, 2015 6 * Copyright IBM Corp. 2002, 2016
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -1204,6 +1204,62 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
1204 } 1204 }
1205} 1205}
1206 1206
1207/**
1208 * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery
1209 * @port: zfcp_port whose fc_rport we should try to unblock
1210 */
1211static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
1212{
1213 unsigned long flags;
1214 struct zfcp_adapter *adapter = port->adapter;
1215 int port_status;
1216 struct Scsi_Host *shost = adapter->scsi_host;
1217 struct scsi_device *sdev;
1218
1219 write_lock_irqsave(&adapter->erp_lock, flags);
1220 port_status = atomic_read(&port->status);
1221 if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
1222 (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE |
1223 ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) {
1224 /* new ERP of severity >= port triggered elsewhere meanwhile or
1225 * local link down (adapter erp_failed but not clear unblock)
1226 */
1227 zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action);
1228 write_unlock_irqrestore(&adapter->erp_lock, flags);
1229 return;
1230 }
1231 spin_lock(shost->host_lock);
1232 __shost_for_each_device(sdev, shost) {
1233 struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
1234 int lun_status;
1235
1236 if (zsdev->port != port)
1237 continue;
1238 /* LUN under port of interest */
1239 lun_status = atomic_read(&zsdev->status);
1240 if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
1241 continue; /* unblock rport despite failed LUNs */
1242 /* LUN recovery not given up yet [maybe follow-up pending] */
1243 if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
1244 (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) {
1245 /* LUN blocked:
1246 * not yet unblocked [LUN recovery pending]
1247 * or meanwhile blocked [new LUN recovery triggered]
1248 */
1249 zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action);
1250 spin_unlock(shost->host_lock);
1251 write_unlock_irqrestore(&adapter->erp_lock, flags);
1252 return;
1253 }
1254 }
1255 /* now port has no child or all children have completed recovery,
1256 * and no ERP of severity >= port was meanwhile triggered elsewhere
1257 */
1258 zfcp_scsi_schedule_rport_register(port);
1259 spin_unlock(shost->host_lock);
1260 write_unlock_irqrestore(&adapter->erp_lock, flags);
1261}
1262
1207static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) 1263static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1208{ 1264{
1209 struct zfcp_adapter *adapter = act->adapter; 1265 struct zfcp_adapter *adapter = act->adapter;
@@ -1214,6 +1270,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1214 case ZFCP_ERP_ACTION_REOPEN_LUN: 1270 case ZFCP_ERP_ACTION_REOPEN_LUN:
1215 if (!(act->status & ZFCP_STATUS_ERP_NO_REF)) 1271 if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
1216 scsi_device_put(sdev); 1272 scsi_device_put(sdev);
1273 zfcp_erp_try_rport_unblock(port);
1217 break; 1274 break;
1218 1275
1219 case ZFCP_ERP_ACTION_REOPEN_PORT: 1276 case ZFCP_ERP_ACTION_REOPEN_PORT:
@@ -1224,7 +1281,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1224 */ 1281 */
1225 if (act->step != ZFCP_ERP_STEP_UNINITIALIZED) 1282 if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
1226 if (result == ZFCP_ERP_SUCCEEDED) 1283 if (result == ZFCP_ERP_SUCCEEDED)
1227 zfcp_scsi_schedule_rport_register(port); 1284 zfcp_erp_try_rport_unblock(port);
1228 /* fall through */ 1285 /* fall through */
1229 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1286 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1230 put_device(&port->dev); 1287 put_device(&port->dev);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 968a0ab4b398..9afdbc32b23f 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * External function declarations. 4 * External function declarations.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2015 6 * Copyright IBM Corp. 2002, 2016
7 */ 7 */
8 8
9#ifndef ZFCP_EXT_H 9#ifndef ZFCP_EXT_H
@@ -35,6 +35,8 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
35extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, 35extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
36 struct zfcp_port *, struct scsi_device *, u8, u8); 36 struct zfcp_port *, struct scsi_device *, u8, u8);
37extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); 37extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
38extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
39 struct zfcp_erp_action *erp);
38extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64); 40extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
39extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *); 41extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
40extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *); 42extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index be1c04b334c5..ea3c76ac0de1 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Interface to the FSF support functions. 4 * Interface to the FSF support functions.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2015 6 * Copyright IBM Corp. 2002, 2016
7 */ 7 */
8 8
9#ifndef FSF_H 9#ifndef FSF_H
@@ -78,6 +78,7 @@
78#define FSF_APP_TAG_CHECK_FAILURE 0x00000082 78#define FSF_APP_TAG_CHECK_FAILURE 0x00000082
79#define FSF_REF_TAG_CHECK_FAILURE 0x00000083 79#define FSF_REF_TAG_CHECK_FAILURE 0x00000083
80#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD 80#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
81#define FSF_FCP_RSP_AVAILABLE 0x000000AF
81#define FSF_UNKNOWN_COMMAND 0x000000E2 82#define FSF_UNKNOWN_COMMAND 0x000000E2
82#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3 83#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
83#define FSF_INVALID_COMMAND_OPTION 0x000000E5 84#define FSF_INVALID_COMMAND_OPTION 0x000000E5
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
index 7c2c6194dfca..703fce59befe 100644
--- a/drivers/s390/scsi/zfcp_reqlist.h
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -4,7 +4,7 @@
4 * Data structure and helper functions for tracking pending FSF 4 * Data structure and helper functions for tracking pending FSF
5 * requests. 5 * requests.
6 * 6 *
7 * Copyright IBM Corp. 2009 7 * Copyright IBM Corp. 2009, 2016
8 */ 8 */
9 9
10#ifndef ZFCP_REQLIST_H 10#ifndef ZFCP_REQLIST_H
@@ -180,4 +180,32 @@ static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
180 spin_unlock_irqrestore(&rl->lock, flags); 180 spin_unlock_irqrestore(&rl->lock, flags);
181} 181}
182 182
183/**
184 * zfcp_reqlist_apply_for_all() - apply a function to every request.
185 * @rl: the requestlist that contains the target requests.
186 * @f: the function to apply to each request; the first parameter of the
187 * function will be the target-request; the second parameter is the same
188 * pointer as given with the argument @data.
189 * @data: freely chosen argument; passed through to @f as second parameter.
190 *
191 * Uses :c:macro:`list_for_each_entry` to iterate over the lists in the hash-
192 * table (not a 'safe' variant, so don't modify the list).
193 *
194 * Holds @rl->lock over the entire request-iteration.
195 */
196static inline void
197zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl,
198 void (*f)(struct zfcp_fsf_req *, void *), void *data)
199{
200 struct zfcp_fsf_req *req;
201 unsigned long flags;
202 unsigned int i;
203
204 spin_lock_irqsave(&rl->lock, flags);
205 for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
206 list_for_each_entry(req, &rl->buckets[i], list)
207 f(req, data);
208 spin_unlock_irqrestore(&rl->lock, flags);
209}
210
183#endif /* ZFCP_REQLIST_H */ 211#endif /* ZFCP_REQLIST_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 9069f98a1817..07ffdbb5107f 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Interface to Linux SCSI midlayer. 4 * Interface to Linux SCSI midlayer.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2015 6 * Copyright IBM Corp. 2002, 2016
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -88,9 +88,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
88 } 88 }
89 89
90 if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { 90 if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
91 /* This could be either 91 /* This could be
92 * open LUN pending: this is temporary, will result in
93 * open LUN or ERP_FAILED, so retry command
94 * call to rport_delete pending: mimic retry from 92 * call to rport_delete pending: mimic retry from
95 * fc_remote_port_chkready until rport is BLOCKED 93 * fc_remote_port_chkready until rport is BLOCKED
96 */ 94 */
@@ -209,6 +207,57 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
209 return retval; 207 return retval;
210} 208}
211 209
210struct zfcp_scsi_req_filter {
211 u8 tmf_scope;
212 u32 lun_handle;
213 u32 port_handle;
214};
215
216static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data)
217{
218 struct zfcp_scsi_req_filter *filter =
219 (struct zfcp_scsi_req_filter *)data;
220
221 /* already aborted - prevent side-effects - or not a SCSI command */
222 if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND)
223 return;
224
225 /* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
226 if (old_req->qtcb->header.port_handle != filter->port_handle)
227 return;
228
229 if (filter->tmf_scope == FCP_TMF_LUN_RESET &&
230 old_req->qtcb->header.lun_handle != filter->lun_handle)
231 return;
232
233 zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req);
234 old_req->data = NULL;
235}
236
237static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
238{
239 struct zfcp_adapter *adapter = zsdev->port->adapter;
240 struct zfcp_scsi_req_filter filter = {
241 .tmf_scope = FCP_TMF_TGT_RESET,
242 .port_handle = zsdev->port->handle,
243 };
244 unsigned long flags;
245
246 if (tm_flags == FCP_TMF_LUN_RESET) {
247 filter.tmf_scope = FCP_TMF_LUN_RESET;
248 filter.lun_handle = zsdev->lun_handle;
249 }
250
251 /*
252 * abort_lock secures against other processings - in the abort-function
253 * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data
254 */
255 write_lock_irqsave(&adapter->abort_lock, flags);
256 zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd,
257 &filter);
258 write_unlock_irqrestore(&adapter->abort_lock, flags);
259}
260
212static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) 261static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
213{ 262{
214 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); 263 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
@@ -241,8 +290,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
241 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { 290 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
242 zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags); 291 zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
243 retval = FAILED; 292 retval = FAILED;
244 } else 293 } else {
245 zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags); 294 zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
295 zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
296 }
246 297
247 zfcp_fsf_req_free(fsf_req); 298 zfcp_fsf_req_free(fsf_req);
248 return retval; 299 return retval;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index a56a7b243e91..316f87fe3299 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1,8 +1,8 @@
1/* 1/*
2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux. 2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
3 3
4 Written By: Adam Radford <linuxraid@lsi.com> 4 Written By: Adam Radford <aradford@gmail.com>
5 Modifications By: Tom Couch <linuxraid@lsi.com> 5 Modifications By: Tom Couch
6 6
7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation. 7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8 Copyright (C) 2010 LSI Corporation. 8 Copyright (C) 2010 LSI Corporation.
@@ -41,10 +41,7 @@
41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42 42
43 Bugs/Comments/Suggestions should be mailed to: 43 Bugs/Comments/Suggestions should be mailed to:
44 linuxraid@lsi.com 44 aradford@gmail.com
45
46 For more information, goto:
47 http://www.lsi.com
48 45
49 Note: This version of the driver does not contain a bundled firmware 46 Note: This version of the driver does not contain a bundled firmware
50 image. 47 image.
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 0fdc83cfa0e1..b6c208cc474f 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -1,8 +1,8 @@
1/* 1/*
2 3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux. 2 3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux.
3 3
4 Written By: Adam Radford <linuxraid@lsi.com> 4 Written By: Adam Radford <aradford@gmail.com>
5 Modifications By: Tom Couch <linuxraid@lsi.com> 5 Modifications By: Tom Couch
6 6
7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation. 7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8 Copyright (C) 2010 LSI Corporation. 8 Copyright (C) 2010 LSI Corporation.
@@ -41,10 +41,7 @@
41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42 42
43 Bugs/Comments/Suggestions should be mailed to: 43 Bugs/Comments/Suggestions should be mailed to:
44 linuxraid@lsi.com 44 aradford@gmail.com
45
46 For more information, goto:
47 http://www.lsi.com
48*/ 45*/
49 46
50#ifndef _3W_9XXX_H 47#ifndef _3W_9XXX_H
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index f8374850f714..970d8fa6bd53 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1,7 +1,7 @@
1/* 1/*
2 3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux. 2 3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
3 3
4 Written By: Adam Radford <linuxraid@lsi.com> 4 Written By: Adam Radford <aradford@gmail.com>
5 5
6 Copyright (C) 2009 LSI Corporation. 6 Copyright (C) 2009 LSI Corporation.
7 7
@@ -43,10 +43,7 @@
43 LSI 3ware 9750 6Gb/s SAS/SATA-RAID 43 LSI 3ware 9750 6Gb/s SAS/SATA-RAID
44 44
45 Bugs/Comments/Suggestions should be mailed to: 45 Bugs/Comments/Suggestions should be mailed to:
46 linuxraid@lsi.com 46 aradford@gmail.com
47
48 For more information, goto:
49 http://www.lsi.com
50 47
51 History 48 History
52 ------- 49 -------
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
index fec6449c7595..05e77d84c16d 100644
--- a/drivers/scsi/3w-sas.h
+++ b/drivers/scsi/3w-sas.h
@@ -1,7 +1,7 @@
1/* 1/*
2 3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux. 2 3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
3 3
4 Written By: Adam Radford <linuxraid@lsi.com> 4 Written By: Adam Radford <aradford@gmail.com>
5 5
6 Copyright (C) 2009 LSI Corporation. 6 Copyright (C) 2009 LSI Corporation.
7 7
@@ -39,10 +39,7 @@
39 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 39 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
40 40
41 Bugs/Comments/Suggestions should be mailed to: 41 Bugs/Comments/Suggestions should be mailed to:
42 linuxraid@lsi.com 42 aradford@gmail.com
43
44 For more information, goto:
45 http://www.lsi.com
46*/ 43*/
47 44
48#ifndef _3W_SAS_H 45#ifndef _3W_SAS_H
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 25aba1613e21..aa412ab02765 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1,7 +1,7 @@
1/* 1/*
2 3w-xxxx.c -- 3ware Storage Controller device driver for Linux. 2 3w-xxxx.c -- 3ware Storage Controller device driver for Linux.
3 3
4 Written By: Adam Radford <linuxraid@lsi.com> 4 Written By: Adam Radford <aradford@gmail.com>
5 Modifications By: Joel Jacobson <linux@3ware.com> 5 Modifications By: Joel Jacobson <linux@3ware.com>
6 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 Brad Strand <linux@3ware.com> 7 Brad Strand <linux@3ware.com>
@@ -47,10 +47,9 @@
47 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 47 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
48 48
49 Bugs/Comments/Suggestions should be mailed to: 49 Bugs/Comments/Suggestions should be mailed to:
50 linuxraid@lsi.com
51 50
52 For more information, goto: 51 aradford@gmail.com
53 http://www.lsi.com 52
54 53
55 History 54 History
56 ------- 55 -------
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 6f65e663d393..69e80c1ed1ca 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -1,7 +1,7 @@
1/* 1/*
2 3w-xxxx.h -- 3ware Storage Controller device driver for Linux. 2 3w-xxxx.h -- 3ware Storage Controller device driver for Linux.
3 3
4 Written By: Adam Radford <linuxraid@lsi.com> 4 Written By: Adam Radford <aradford@gmail.com>
5 Modifications By: Joel Jacobson <linux@3ware.com> 5 Modifications By: Joel Jacobson <linux@3ware.com>
6 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 Brad Strand <linux@3ware.com> 7 Brad Strand <linux@3ware.com>
@@ -45,7 +45,8 @@
45 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 45 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 46
47 Bugs/Comments/Suggestions should be mailed to: 47 Bugs/Comments/Suggestions should be mailed to:
48 linuxraid@lsi.com 48
49 aradford@gmail.com
49 50
50 For more information, goto: 51 For more information, goto:
51 http://www.lsi.com 52 http://www.lsi.com
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index dfa93347c752..a4f6b0d95515 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1233,6 +1233,7 @@ config SCSI_QLOGICPTI
1233 1233
1234source "drivers/scsi/qla2xxx/Kconfig" 1234source "drivers/scsi/qla2xxx/Kconfig"
1235source "drivers/scsi/qla4xxx/Kconfig" 1235source "drivers/scsi/qla4xxx/Kconfig"
1236source "drivers/scsi/qedi/Kconfig"
1236 1237
1237config SCSI_LPFC 1238config SCSI_LPFC
1238 tristate "Emulex LightPulse Fibre Channel Support" 1239 tristate "Emulex LightPulse Fibre Channel Support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index a2d03957cbe2..736b77414a4b 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -131,6 +131,7 @@ obj-$(CONFIG_PS3_ROM) += ps3rom.o
131obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ 131obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
132obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ 132obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
133obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ 133obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
134obj-$(CONFIG_QEDI) += libiscsi.o qedi/
134obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ 135obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
135obj-$(CONFIG_SCSI_ESAS2R) += esas2r/ 136obj-$(CONFIG_SCSI_ESAS2R) += esas2r/
136obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o 137obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index d849ffa378b1..4f5ca794bb71 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -97,9 +97,6 @@
97 * and macros and include this file in your driver. 97 * and macros and include this file in your driver.
98 * 98 *
99 * These macros control options : 99 * These macros control options :
100 * AUTOPROBE_IRQ - if defined, the NCR5380_probe_irq() function will be
101 * defined.
102 *
103 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically 100 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
104 * for commands that return with a CHECK CONDITION status. 101 * for commands that return with a CHECK CONDITION status.
105 * 102 *
@@ -127,9 +124,7 @@
127 * NCR5380_dma_residual - residual byte count 124 * NCR5380_dma_residual - residual byte count
128 * 125 *
129 * The generic driver is initialized by calling NCR5380_init(instance), 126 * The generic driver is initialized by calling NCR5380_init(instance),
130 * after setting the appropriate host specific fields and ID. If the 127 * after setting the appropriate host specific fields and ID.
131 * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
132 * possible) function may be used.
133 */ 128 */
134 129
135#ifndef NCR5380_io_delay 130#ifndef NCR5380_io_delay
@@ -351,76 +346,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
351} 346}
352#endif 347#endif
353 348
354
355static int probe_irq;
356
357/**
358 * probe_intr - helper for IRQ autoprobe
359 * @irq: interrupt number
360 * @dev_id: unused
361 * @regs: unused
362 *
363 * Set a flag to indicate the IRQ in question was received. This is
364 * used by the IRQ probe code.
365 */
366
367static irqreturn_t probe_intr(int irq, void *dev_id)
368{
369 probe_irq = irq;
370 return IRQ_HANDLED;
371}
372
373/**
374 * NCR5380_probe_irq - find the IRQ of an NCR5380
375 * @instance: NCR5380 controller
376 * @possible: bitmask of ISA IRQ lines
377 *
378 * Autoprobe for the IRQ line used by the NCR5380 by triggering an IRQ
379 * and then looking to see what interrupt actually turned up.
380 */
381
382static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
383 int possible)
384{
385 struct NCR5380_hostdata *hostdata = shost_priv(instance);
386 unsigned long timeout;
387 int trying_irqs, i, mask;
388
389 for (trying_irqs = 0, i = 1, mask = 2; i < 16; ++i, mask <<= 1)
390 if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
391 trying_irqs |= mask;
392
393 timeout = jiffies + msecs_to_jiffies(250);
394 probe_irq = NO_IRQ;
395
396 /*
397 * A interrupt is triggered whenever BSY = false, SEL = true
398 * and a bit set in the SELECT_ENABLE_REG is asserted on the
399 * SCSI bus.
400 *
401 * Note that the bus is only driven when the phase control signals
402 * (I/O, C/D, and MSG) match those in the TCR, so we must reset that
403 * to zero.
404 */
405
406 NCR5380_write(TARGET_COMMAND_REG, 0);
407 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
408 NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
409 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
410
411 while (probe_irq == NO_IRQ && time_before(jiffies, timeout))
412 schedule_timeout_uninterruptible(1);
413
414 NCR5380_write(SELECT_ENABLE_REG, 0);
415 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
416
417 for (i = 1, mask = 2; i < 16; ++i, mask <<= 1)
418 if (trying_irqs & mask)
419 free_irq(i, NULL);
420
421 return probe_irq;
422}
423
424/** 349/**
425 * NCR58380_info - report driver and host information 350 * NCR58380_info - report driver and host information
426 * @instance: relevant scsi host instance 351 * @instance: relevant scsi host instance
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 3c6ce5434449..51a3567a6fb2 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -199,16 +199,6 @@
199 199
200#define PHASE_SR_TO_TCR(phase) ((phase) >> 2) 200#define PHASE_SR_TO_TCR(phase) ((phase) >> 2)
201 201
202/*
203 * These are "special" values for the irq and dma_channel fields of the
204 * Scsi_Host structure
205 */
206
207#define DMA_NONE 255
208#define IRQ_AUTO 254
209#define DMA_AUTO 254
210#define PORT_AUTO 0xffff /* autoprobe io port for 53c400a */
211
212#ifndef NO_IRQ 202#ifndef NO_IRQ
213#define NO_IRQ 0 203#define NO_IRQ 0
214#endif 204#endif
@@ -290,7 +280,6 @@ static void NCR5380_print(struct Scsi_Host *instance);
290#define NCR5380_dprint_phase(flg, arg) do {} while (0) 280#define NCR5380_dprint_phase(flg, arg) do {} while (0)
291#endif 281#endif
292 282
293static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible);
294static int NCR5380_init(struct Scsi_Host *instance, int flags); 283static int NCR5380_init(struct Scsi_Host *instance, int flags);
295static int NCR5380_maybe_reset_bus(struct Scsi_Host *); 284static int NCR5380_maybe_reset_bus(struct Scsi_Host *);
296static void NCR5380_exit(struct Scsi_Host *instance); 285static void NCR5380_exit(struct Scsi_Host *instance);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index e4f3e22fcbd9..3ecbf20ca29f 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -160,7 +160,6 @@ static const struct pci_device_id aac_pci_tbl[] = {
160 { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */ 160 { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
161 { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */ 161 { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
162 { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */ 162 { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
163 { 0x9005, 0x028f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 65 }, /* Adaptec PMC Series 9 */
164 { 0,} 163 { 0,}
165}; 164};
166MODULE_DEVICE_TABLE(pci, aac_pci_tbl); 165MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -239,7 +238,6 @@ static struct aac_driver_ident aac_drivers[] = {
239 { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */ 238 { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
240 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */ 239 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
241 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */ 240 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
242 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
243}; 241};
244 242
245/** 243/**
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 9e6f647ff1c1..9a2fdc305cf2 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -189,7 +189,6 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
189 struct l2t_entry *e) 189 struct l2t_entry *e)
190{ 190{
191 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 191 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
192 int t4 = is_t4(lldi->adapter_type);
193 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 192 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
194 unsigned long long opt0; 193 unsigned long long opt0;
195 unsigned int opt2; 194 unsigned int opt2;
@@ -232,7 +231,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
232 csk, &req->local_ip, ntohs(req->local_port), 231 csk, &req->local_ip, ntohs(req->local_port),
233 &req->peer_ip, ntohs(req->peer_port), 232 &req->peer_ip, ntohs(req->peer_port),
234 csk->atid, csk->rss_qid); 233 csk->atid, csk->rss_qid);
235 } else { 234 } else if (is_t5(lldi->adapter_type)) {
236 struct cpl_t5_act_open_req *req = 235 struct cpl_t5_act_open_req *req =
237 (struct cpl_t5_act_open_req *)skb->head; 236 (struct cpl_t5_act_open_req *)skb->head;
238 u32 isn = (prandom_u32() & ~7UL) - 1; 237 u32 isn = (prandom_u32() & ~7UL) - 1;
@@ -260,12 +259,45 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
260 csk, &req->local_ip, ntohs(req->local_port), 259 csk, &req->local_ip, ntohs(req->local_port),
261 &req->peer_ip, ntohs(req->peer_port), 260 &req->peer_ip, ntohs(req->peer_port),
262 csk->atid, csk->rss_qid); 261 csk->atid, csk->rss_qid);
262 } else {
263 struct cpl_t6_act_open_req *req =
264 (struct cpl_t6_act_open_req *)skb->head;
265 u32 isn = (prandom_u32() & ~7UL) - 1;
266
267 INIT_TP_WR(req, 0);
268 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
269 qid_atid));
270 req->local_port = csk->saddr.sin_port;
271 req->peer_port = csk->daddr.sin_port;
272 req->local_ip = csk->saddr.sin_addr.s_addr;
273 req->peer_ip = csk->daddr.sin_addr.s_addr;
274 req->opt0 = cpu_to_be64(opt0);
275 req->params = cpu_to_be64(FILTER_TUPLE_V(
276 cxgb4_select_ntuple(
277 csk->cdev->ports[csk->port_id],
278 csk->l2t)));
279 req->rsvd = cpu_to_be32(isn);
280
281 opt2 |= T5_ISS_VALID;
282 opt2 |= RX_FC_DISABLE_F;
283 opt2 |= T5_OPT_2_VALID_F;
284
285 req->opt2 = cpu_to_be32(opt2);
286 req->rsvd2 = cpu_to_be32(0);
287 req->opt3 = cpu_to_be32(0);
288
289 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
290 "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
291 csk, &req->local_ip, ntohs(req->local_port),
292 &req->peer_ip, ntohs(req->peer_port),
293 csk->atid, csk->rss_qid);
263 } 294 }
264 295
265 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 296 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
266 297
267 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n", 298 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
268 (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk, 299 (&csk->saddr), (&csk->daddr),
300 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk,
269 csk->state, csk->flags, csk->atid, csk->rss_qid); 301 csk->state, csk->flags, csk->atid, csk->rss_qid);
270 302
271 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 303 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
@@ -276,7 +308,6 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
276 struct l2t_entry *e) 308 struct l2t_entry *e)
277{ 309{
278 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 310 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
279 int t4 = is_t4(lldi->adapter_type);
280 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 311 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
281 unsigned long long opt0; 312 unsigned long long opt0;
282 unsigned int opt2; 313 unsigned int opt2;
@@ -294,10 +325,9 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
294 325
295 opt2 = RX_CHANNEL_V(0) | 326 opt2 = RX_CHANNEL_V(0) |
296 RSS_QUEUE_VALID_F | 327 RSS_QUEUE_VALID_F |
297 RX_FC_DISABLE_F |
298 RSS_QUEUE_V(csk->rss_qid); 328 RSS_QUEUE_V(csk->rss_qid);
299 329
300 if (t4) { 330 if (is_t4(lldi->adapter_type)) {
301 struct cpl_act_open_req6 *req = 331 struct cpl_act_open_req6 *req =
302 (struct cpl_act_open_req6 *)skb->head; 332 (struct cpl_act_open_req6 *)skb->head;
303 333
@@ -322,7 +352,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
322 req->params = cpu_to_be32(cxgb4_select_ntuple( 352 req->params = cpu_to_be32(cxgb4_select_ntuple(
323 csk->cdev->ports[csk->port_id], 353 csk->cdev->ports[csk->port_id],
324 csk->l2t)); 354 csk->l2t));
325 } else { 355 } else if (is_t5(lldi->adapter_type)) {
326 struct cpl_t5_act_open_req6 *req = 356 struct cpl_t5_act_open_req6 *req =
327 (struct cpl_t5_act_open_req6 *)skb->head; 357 (struct cpl_t5_act_open_req6 *)skb->head;
328 358
@@ -345,12 +375,41 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
345 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( 375 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
346 csk->cdev->ports[csk->port_id], 376 csk->cdev->ports[csk->port_id],
347 csk->l2t))); 377 csk->l2t)));
378 } else {
379 struct cpl_t6_act_open_req6 *req =
380 (struct cpl_t6_act_open_req6 *)skb->head;
381
382 INIT_TP_WR(req, 0);
383 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
384 qid_atid));
385 req->local_port = csk->saddr6.sin6_port;
386 req->peer_port = csk->daddr6.sin6_port;
387 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
388 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
389 8);
390 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
391 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
392 8);
393 req->opt0 = cpu_to_be64(opt0);
394
395 opt2 |= RX_FC_DISABLE_F;
396 opt2 |= T5_OPT_2_VALID_F;
397
398 req->opt2 = cpu_to_be32(opt2);
399
400 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
401 csk->cdev->ports[csk->port_id],
402 csk->l2t)));
403
404 req->rsvd2 = cpu_to_be32(0);
405 req->opt3 = cpu_to_be32(0);
348 } 406 }
349 407
350 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 408 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
351 409
352 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n", 410 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
353 t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid, 411 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state,
412 csk->flags, csk->atid,
354 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), 413 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
355 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), 414 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
356 csk->rss_qid); 415 csk->rss_qid);
@@ -742,7 +801,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
742 (&csk->saddr), (&csk->daddr), 801 (&csk->saddr), (&csk->daddr),
743 atid, tid, csk, csk->state, csk->flags, rcv_isn); 802 atid, tid, csk, csk->state, csk->flags, rcv_isn);
744 803
745 module_put(THIS_MODULE); 804 module_put(cdev->owner);
746 805
747 cxgbi_sock_get(csk); 806 cxgbi_sock_get(csk);
748 csk->tid = tid; 807 csk->tid = tid;
@@ -891,7 +950,7 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
891 if (is_neg_adv(status)) 950 if (is_neg_adv(status))
892 goto rel_skb; 951 goto rel_skb;
893 952
894 module_put(THIS_MODULE); 953 module_put(cdev->owner);
895 954
896 if (status && status != CPL_ERR_TCAM_FULL && 955 if (status && status != CPL_ERR_TCAM_FULL &&
897 status != CPL_ERR_CONN_EXIST && 956 status != CPL_ERR_CONN_EXIST &&
@@ -1173,6 +1232,101 @@ rel_skb:
1173 __kfree_skb(skb); 1232 __kfree_skb(skb);
1174} 1233}
1175 1234
1235static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1236{
1237 struct cxgbi_sock *csk;
1238 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
1239 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1240 struct tid_info *t = lldi->tids;
1241 struct sk_buff *lskb;
1242 u32 tid = GET_TID(cpl);
1243 u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
1244
1245 csk = lookup_tid(t, tid);
1246 if (unlikely(!csk)) {
1247 pr_err("can't find conn. for tid %u.\n", tid);
1248 goto rel_skb;
1249 }
1250
1251 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1252 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1253 csk, csk->state, csk->flags, csk->tid, skb,
1254 skb->len, pdu_len_ddp);
1255
1256 spin_lock_bh(&csk->lock);
1257
1258 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1259 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1260 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1261 csk, csk->state, csk->flags, csk->tid);
1262
1263 if (csk->state != CTP_ABORTING)
1264 goto abort_conn;
1265 else
1266 goto discard;
1267 }
1268
1269 cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq);
1270 cxgbi_skcb_flags(skb) = 0;
1271
1272 skb_reset_transport_header(skb);
1273 __skb_pull(skb, sizeof(*cpl));
1274 __pskb_trim(skb, ntohs(cpl->len));
1275
1276 if (!csk->skb_ulp_lhdr)
1277 csk->skb_ulp_lhdr = skb;
1278
1279 lskb = csk->skb_ulp_lhdr;
1280 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
1281
1282 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1283 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1284 csk, csk->state, csk->flags, skb, lskb);
1285
1286 __skb_queue_tail(&csk->receive_queue, skb);
1287 spin_unlock_bh(&csk->lock);
1288 return;
1289
1290abort_conn:
1291 send_abort_req(csk);
1292discard:
1293 spin_unlock_bh(&csk->lock);
1294rel_skb:
1295 __kfree_skb(skb);
1296}
1297
1298static void
1299cxgb4i_process_ddpvld(struct cxgbi_sock *csk,
1300 struct sk_buff *skb, u32 ddpvld)
1301{
1302 if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
1303 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1304 csk, skb, ddpvld, cxgbi_skcb_flags(skb));
1305 cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
1306 }
1307
1308 if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
1309 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1310 csk, skb, ddpvld, cxgbi_skcb_flags(skb));
1311 cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
1312 }
1313
1314 if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1315 log_debug(1 << CXGBI_DBG_PDU_RX,
1316 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1317 csk, skb, ddpvld);
1318 cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
1319 }
1320
1321 if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1322 !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
1323 log_debug(1 << CXGBI_DBG_PDU_RX,
1324 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1325 csk, skb, ddpvld);
1326 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
1327 }
1328}
1329
1176static void do_rx_data_ddp(struct cxgbi_device *cdev, 1330static void do_rx_data_ddp(struct cxgbi_device *cdev,
1177 struct sk_buff *skb) 1331 struct sk_buff *skb)
1178{ 1332{
@@ -1182,7 +1336,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
1182 unsigned int tid = GET_TID(rpl); 1336 unsigned int tid = GET_TID(rpl);
1183 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1337 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1184 struct tid_info *t = lldi->tids; 1338 struct tid_info *t = lldi->tids;
1185 unsigned int status = ntohl(rpl->ddpvld); 1339 u32 ddpvld = be32_to_cpu(rpl->ddpvld);
1186 1340
1187 csk = lookup_tid(t, tid); 1341 csk = lookup_tid(t, tid);
1188 if (unlikely(!csk)) { 1342 if (unlikely(!csk)) {
@@ -1192,7 +1346,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
1192 1346
1193 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1347 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1194 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n", 1348 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1195 csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr); 1349 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr);
1196 1350
1197 spin_lock_bh(&csk->lock); 1351 spin_lock_bh(&csk->lock);
1198 1352
@@ -1220,29 +1374,8 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
1220 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n", 1374 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1221 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); 1375 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
1222 1376
1223 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { 1377 cxgb4i_process_ddpvld(csk, lskb, ddpvld);
1224 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n", 1378
1225 csk, lskb, status, cxgbi_skcb_flags(lskb));
1226 cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
1227 }
1228 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
1229 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1230 csk, lskb, status, cxgbi_skcb_flags(lskb));
1231 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
1232 }
1233 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1234 log_debug(1 << CXGBI_DBG_PDU_RX,
1235 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1236 csk, lskb, status);
1237 cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
1238 }
1239 if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1240 !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
1241 log_debug(1 << CXGBI_DBG_PDU_RX,
1242 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1243 csk, lskb, status);
1244 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
1245 }
1246 log_debug(1 << CXGBI_DBG_PDU_RX, 1379 log_debug(1 << CXGBI_DBG_PDU_RX,
1247 "csk 0x%p, lskb 0x%p, f 0x%lx.\n", 1380 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1248 csk, lskb, cxgbi_skcb_flags(lskb)); 1381 csk, lskb, cxgbi_skcb_flags(lskb));
@@ -1260,6 +1393,98 @@ rel_skb:
1260 __kfree_skb(skb); 1393 __kfree_skb(skb);
1261} 1394}
1262 1395
1396static void
1397do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb)
1398{
1399 struct cxgbi_sock *csk;
1400 struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data;
1401 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1402 struct tid_info *t = lldi->tids;
1403 struct sk_buff *data_skb = NULL;
1404 u32 tid = GET_TID(rpl);
1405 u32 ddpvld = be32_to_cpu(rpl->ddpvld);
1406 u32 seq = be32_to_cpu(rpl->seq);
1407 u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp);
1408
1409 csk = lookup_tid(t, tid);
1410 if (unlikely(!csk)) {
1411 pr_err("can't find connection for tid %u.\n", tid);
1412 goto rel_skb;
1413 }
1414
1415 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1416 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
1417 "pdu_len_ddp %u, status %u.\n",
1418 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr,
1419 ntohs(rpl->len), pdu_len_ddp, rpl->status);
1420
1421 spin_lock_bh(&csk->lock);
1422
1423 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1424 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1425 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1426 csk, csk->state, csk->flags, csk->tid);
1427
1428 if (csk->state != CTP_ABORTING)
1429 goto abort_conn;
1430 else
1431 goto discard;
1432 }
1433
1434 cxgbi_skcb_tcp_seq(skb) = seq;
1435 cxgbi_skcb_flags(skb) = 0;
1436 cxgbi_skcb_rx_pdulen(skb) = 0;
1437
1438 skb_reset_transport_header(skb);
1439 __skb_pull(skb, sizeof(*rpl));
1440 __pskb_trim(skb, be16_to_cpu(rpl->len));
1441
1442 csk->rcv_nxt = seq + pdu_len_ddp;
1443
1444 if (csk->skb_ulp_lhdr) {
1445 data_skb = skb_peek(&csk->receive_queue);
1446 if (!data_skb ||
1447 !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) {
1448 pr_err("Error! freelist data not found 0x%p, tid %u\n",
1449 data_skb, tid);
1450
1451 goto abort_conn;
1452 }
1453 __skb_unlink(data_skb, &csk->receive_queue);
1454
1455 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA);
1456
1457 __skb_queue_tail(&csk->receive_queue, skb);
1458 __skb_queue_tail(&csk->receive_queue, data_skb);
1459 } else {
1460 __skb_queue_tail(&csk->receive_queue, skb);
1461 }
1462
1463 csk->skb_ulp_lhdr = NULL;
1464
1465 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
1466 cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
1467 cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL);
1468 cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc);
1469
1470 cxgb4i_process_ddpvld(csk, skb, ddpvld);
1471
1472 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
1473 csk, skb, cxgbi_skcb_flags(skb));
1474
1475 cxgbi_conn_pdu_ready(csk);
1476 spin_unlock_bh(&csk->lock);
1477
1478 return;
1479
1480abort_conn:
1481 send_abort_req(csk);
1482discard:
1483 spin_unlock_bh(&csk->lock);
1484rel_skb:
1485 __kfree_skb(skb);
1486}
1487
1263static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb) 1488static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1264{ 1489{
1265 struct cxgbi_sock *csk; 1490 struct cxgbi_sock *csk;
@@ -1382,7 +1607,6 @@ static int init_act_open(struct cxgbi_sock *csk)
1382 void *daddr; 1607 void *daddr;
1383 unsigned int step; 1608 unsigned int step;
1384 unsigned int size, size6; 1609 unsigned int size, size6;
1385 int t4 = is_t4(lldi->adapter_type);
1386 unsigned int linkspeed; 1610 unsigned int linkspeed;
1387 unsigned int rcv_winf, snd_winf; 1611 unsigned int rcv_winf, snd_winf;
1388 1612
@@ -1428,12 +1652,15 @@ static int init_act_open(struct cxgbi_sock *csk)
1428 cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1); 1652 cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
1429#endif 1653#endif
1430 1654
1431 if (t4) { 1655 if (is_t4(lldi->adapter_type)) {
1432 size = sizeof(struct cpl_act_open_req); 1656 size = sizeof(struct cpl_act_open_req);
1433 size6 = sizeof(struct cpl_act_open_req6); 1657 size6 = sizeof(struct cpl_act_open_req6);
1434 } else { 1658 } else if (is_t5(lldi->adapter_type)) {
1435 size = sizeof(struct cpl_t5_act_open_req); 1659 size = sizeof(struct cpl_t5_act_open_req);
1436 size6 = sizeof(struct cpl_t5_act_open_req6); 1660 size6 = sizeof(struct cpl_t5_act_open_req6);
1661 } else {
1662 size = sizeof(struct cpl_t6_act_open_req);
1663 size6 = sizeof(struct cpl_t6_act_open_req6);
1437 } 1664 }
1438 1665
1439 if (csk->csk_family == AF_INET) 1666 if (csk->csk_family == AF_INET)
@@ -1452,8 +1679,8 @@ static int init_act_open(struct cxgbi_sock *csk)
1452 csk->mtu = dst_mtu(csk->dst); 1679 csk->mtu = dst_mtu(csk->dst);
1453 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); 1680 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1454 csk->tx_chan = cxgb4_port_chan(ndev); 1681 csk->tx_chan = cxgb4_port_chan(ndev);
1455 /* SMT two entries per row */ 1682 csk->smac_idx = cxgb4_tp_smt_idx(lldi->adapter_type,
1456 csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1; 1683 cxgb4_port_viid(ndev));
1457 step = lldi->ntxq / lldi->nchan; 1684 step = lldi->ntxq / lldi->nchan;
1458 csk->txq_idx = cxgb4_port_idx(ndev) * step; 1685 csk->txq_idx = cxgb4_port_idx(ndev) * step;
1459 step = lldi->nrxq / lldi->nchan; 1686 step = lldi->nrxq / lldi->nchan;
@@ -1486,7 +1713,11 @@ static int init_act_open(struct cxgbi_sock *csk)
1486 csk->mtu, csk->mss_idx, csk->smac_idx); 1713 csk->mtu, csk->mss_idx, csk->smac_idx);
1487 1714
1488 /* must wait for either a act_open_rpl or act_open_establish */ 1715 /* must wait for either a act_open_rpl or act_open_establish */
1489 try_module_get(THIS_MODULE); 1716 if (!try_module_get(cdev->owner)) {
1717 pr_err("%s, try_module_get failed.\n", ndev->name);
1718 goto rel_resource;
1719 }
1720
1490 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); 1721 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1491 if (csk->csk_family == AF_INET) 1722 if (csk->csk_family == AF_INET)
1492 send_act_open_req(csk, skb, csk->l2t); 1723 send_act_open_req(csk, skb, csk->l2t);
@@ -1521,10 +1752,11 @@ static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1521 [CPL_CLOSE_CON_RPL] = do_close_con_rpl, 1752 [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1522 [CPL_FW4_ACK] = do_fw4_ack, 1753 [CPL_FW4_ACK] = do_fw4_ack,
1523 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, 1754 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
1524 [CPL_ISCSI_DATA] = do_rx_iscsi_hdr, 1755 [CPL_ISCSI_DATA] = do_rx_iscsi_data,
1525 [CPL_SET_TCB_RPL] = do_set_tcb_rpl, 1756 [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1526 [CPL_RX_DATA_DDP] = do_rx_data_ddp, 1757 [CPL_RX_DATA_DDP] = do_rx_data_ddp,
1527 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, 1758 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
1759 [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp,
1528 [CPL_RX_DATA] = do_rx_data, 1760 [CPL_RX_DATA] = do_rx_data,
1529}; 1761};
1530 1762
@@ -1794,10 +2026,12 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1794 cdev->nports = lldi->nports; 2026 cdev->nports = lldi->nports;
1795 cdev->mtus = lldi->mtus; 2027 cdev->mtus = lldi->mtus;
1796 cdev->nmtus = NMTUS; 2028 cdev->nmtus = NMTUS;
1797 cdev->rx_credit_thres = cxgb4i_rx_credit_thres; 2029 cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <=
2030 CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0;
1798 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN; 2031 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
1799 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); 2032 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
1800 cdev->itp = &cxgb4i_iscsi_transport; 2033 cdev->itp = &cxgb4i_iscsi_transport;
2034 cdev->owner = THIS_MODULE;
1801 2035
1802 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0])) 2036 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
1803 << FW_VIID_PFN_S; 2037 << FW_VIID_PFN_S;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 2ffe029ff2b6..9167bcd9fffe 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -642,6 +642,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
642 n->dev->name, ndev->name, mtu); 642 n->dev->name, ndev->name, mtu);
643 } 643 }
644 644
645 if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
646 pr_info("%s interface not up.\n", ndev->name);
647 err = -ENETDOWN;
648 goto rel_neigh;
649 }
650
645 cdev = cxgbi_device_find_by_netdev(ndev, &port); 651 cdev = cxgbi_device_find_by_netdev(ndev, &port);
646 if (!cdev) { 652 if (!cdev) {
647 pr_info("dst %pI4, %s, NOT cxgbi device.\n", 653 pr_info("dst %pI4, %s, NOT cxgbi device.\n",
@@ -736,6 +742,12 @@ static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
736 } 742 }
737 ndev = n->dev; 743 ndev = n->dev;
738 744
745 if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
746 pr_info("%s interface not up.\n", ndev->name);
747 err = -ENETDOWN;
748 goto rel_rt;
749 }
750
739 if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) { 751 if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) {
740 pr_info("multi-cast route %pI6 port %u, dev %s.\n", 752 pr_info("multi-cast route %pI6 port %u, dev %s.\n",
741 daddr6->sin6_addr.s6_addr, 753 daddr6->sin6_addr.s6_addr,
@@ -896,6 +908,7 @@ EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open);
896void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb) 908void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
897{ 909{
898 struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk; 910 struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
911 struct module *owner = csk->cdev->owner;
899 912
900 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 913 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
901 csk, (csk)->state, (csk)->flags, (csk)->tid); 914 csk, (csk)->state, (csk)->flags, (csk)->tid);
@@ -906,6 +919,8 @@ void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
906 spin_unlock_bh(&csk->lock); 919 spin_unlock_bh(&csk->lock);
907 cxgbi_sock_put(csk); 920 cxgbi_sock_put(csk);
908 __kfree_skb(skb); 921 __kfree_skb(skb);
922
923 module_put(owner);
909} 924}
910EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure); 925EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure);
911 926
@@ -1574,6 +1589,25 @@ static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
1574 return -EIO; 1589 return -EIO;
1575 } 1590 }
1576 1591
1592 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_ISCSI_COMPL) &&
1593 cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA_DDPD)) {
1594 /* If completion flag is set and data is directly
1595 * placed in to the host memory then update
1596 * task->exp_datasn to the datasn in completion
1597 * iSCSI hdr as T6 adapter generates completion only
1598 * for the last pdu of a sequence.
1599 */
1600 itt_t itt = ((struct iscsi_data *)skb->data)->itt;
1601 struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt);
1602 u32 data_sn = be32_to_cpu(((struct iscsi_data *)
1603 skb->data)->datasn);
1604 if (task && task->sc) {
1605 struct iscsi_tcp_task *tcp_task = task->dd_data;
1606
1607 tcp_task->exp_datasn = data_sn;
1608 }
1609 }
1610
1577 return read_pdu_skb(conn, skb, 0, 0); 1611 return read_pdu_skb(conn, skb, 0, 0);
1578} 1612}
1579 1613
@@ -1627,15 +1661,15 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
1627 csk->rcv_wup, cdev->rx_credit_thres, 1661 csk->rcv_wup, cdev->rx_credit_thres,
1628 csk->rcv_win); 1662 csk->rcv_win);
1629 1663
1664 if (!cdev->rx_credit_thres)
1665 return;
1666
1630 if (csk->state != CTP_ESTABLISHED) 1667 if (csk->state != CTP_ESTABLISHED)
1631 return; 1668 return;
1632 1669
1633 credits = csk->copied_seq - csk->rcv_wup; 1670 credits = csk->copied_seq - csk->rcv_wup;
1634 if (unlikely(!credits)) 1671 if (unlikely(!credits))
1635 return; 1672 return;
1636 if (unlikely(cdev->rx_credit_thres == 0))
1637 return;
1638
1639 must_send = credits + 16384 >= csk->rcv_win; 1673 must_send = credits + 16384 >= csk->rcv_win;
1640 if (must_send || credits >= cdev->rx_credit_thres) 1674 if (must_send || credits >= cdev->rx_credit_thres)
1641 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); 1675 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index e7802738f5d2..95ba99044c3e 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -207,6 +207,7 @@ enum cxgbi_skcb_flags {
207 SKCBF_RX_HDR, /* received pdu header */ 207 SKCBF_RX_HDR, /* received pdu header */
208 SKCBF_RX_DATA, /* received pdu payload */ 208 SKCBF_RX_DATA, /* received pdu payload */
209 SKCBF_RX_STATUS, /* received ddp status */ 209 SKCBF_RX_STATUS, /* received ddp status */
210 SKCBF_RX_ISCSI_COMPL, /* received iscsi completion */
210 SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */ 211 SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */
211 SKCBF_RX_HCRC_ERR, /* header digest error */ 212 SKCBF_RX_HCRC_ERR, /* header digest error */
212 SKCBF_RX_DCRC_ERR, /* data digest error */ 213 SKCBF_RX_DCRC_ERR, /* data digest error */
@@ -467,6 +468,7 @@ struct cxgbi_device {
467 struct pci_dev *pdev; 468 struct pci_dev *pdev;
468 struct dentry *debugfs_root; 469 struct dentry *debugfs_root;
469 struct iscsi_transport *itp; 470 struct iscsi_transport *itp;
471 struct module *owner;
470 472
471 unsigned int pfvf; 473 unsigned int pfvf;
472 unsigned int rx_credit_thres; 474 unsigned int rx_credit_thres;
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index de5147a8c959..6f9665d50d84 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -37,7 +37,7 @@
37#define MAX_CARDS 8 37#define MAX_CARDS 8
38 38
39/* old-style parameters for compatibility */ 39/* old-style parameters for compatibility */
40static int ncr_irq; 40static int ncr_irq = -1;
41static int ncr_addr; 41static int ncr_addr;
42static int ncr_5380; 42static int ncr_5380;
43static int ncr_53c400; 43static int ncr_53c400;
@@ -52,9 +52,9 @@ module_param(ncr_53c400a, int, 0);
52module_param(dtc_3181e, int, 0); 52module_param(dtc_3181e, int, 0);
53module_param(hp_c2502, int, 0); 53module_param(hp_c2502, int, 0);
54 54
55static int irq[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 55static int irq[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
56module_param_array(irq, int, NULL, 0); 56module_param_array(irq, int, NULL, 0);
57MODULE_PARM_DESC(irq, "IRQ number(s)"); 57MODULE_PARM_DESC(irq, "IRQ number(s) (0=none, 254=auto [default])");
58 58
59static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 59static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
60module_param_array(base, int, NULL, 0); 60module_param_array(base, int, NULL, 0);
@@ -67,6 +67,56 @@ MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC31
67MODULE_ALIAS("g_NCR5380_mmio"); 67MODULE_ALIAS("g_NCR5380_mmio");
68MODULE_LICENSE("GPL"); 68MODULE_LICENSE("GPL");
69 69
70static void g_NCR5380_trigger_irq(struct Scsi_Host *instance)
71{
72 struct NCR5380_hostdata *hostdata = shost_priv(instance);
73
74 /*
75 * An interrupt is triggered whenever BSY = false, SEL = true
76 * and a bit set in the SELECT_ENABLE_REG is asserted on the
77 * SCSI bus.
78 *
79 * Note that the bus is only driven when the phase control signals
80 * (I/O, C/D, and MSG) match those in the TCR.
81 */
82 NCR5380_write(TARGET_COMMAND_REG,
83 PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK));
84 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
85 NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
86 NCR5380_write(INITIATOR_COMMAND_REG,
87 ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
88
89 msleep(1);
90
91 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
92 NCR5380_write(SELECT_ENABLE_REG, 0);
93 NCR5380_write(TARGET_COMMAND_REG, 0);
94}
95
96/**
97 * g_NCR5380_probe_irq - find the IRQ of a NCR5380 or equivalent
98 * @instance: SCSI host instance
99 *
100 * Autoprobe for the IRQ line used by the card by triggering an IRQ
101 * and then looking to see what interrupt actually turned up.
102 */
103
104static int g_NCR5380_probe_irq(struct Scsi_Host *instance)
105{
106 struct NCR5380_hostdata *hostdata = shost_priv(instance);
107 int irq_mask, irq;
108
109 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
110 irq_mask = probe_irq_on();
111 g_NCR5380_trigger_irq(instance);
112 irq = probe_irq_off(irq_mask);
113 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
114
115 if (irq <= 0)
116 return NO_IRQ;
117 return irq;
118}
119
70/* 120/*
71 * Configure I/O address of 53C400A or DTC436 by writing magic numbers 121 * Configure I/O address of 53C400A or DTC436 by writing magic numbers
72 * to ports 0x779 and 0x379. 122 * to ports 0x779 and 0x379.
@@ -81,14 +131,33 @@ static void magic_configure(int idx, u8 irq, u8 magic[])
81 outb(magic[3], 0x379); 131 outb(magic[3], 0x379);
82 outb(magic[4], 0x379); 132 outb(magic[4], 0x379);
83 133
84 /* allowed IRQs for HP C2502 */ 134 if (irq == 9)
85 if (irq != 2 && irq != 3 && irq != 4 && irq != 5 && irq != 7) 135 irq = 2;
86 irq = 0; 136
87 if (idx >= 0 && idx <= 7) 137 if (idx >= 0 && idx <= 7)
88 cfg = 0x80 | idx | (irq << 4); 138 cfg = 0x80 | idx | (irq << 4);
89 outb(cfg, 0x379); 139 outb(cfg, 0x379);
90} 140}
91 141
142static irqreturn_t legacy_empty_irq_handler(int irq, void *dev_id)
143{
144 return IRQ_HANDLED;
145}
146
147static int legacy_find_free_irq(int *irq_table)
148{
149 while (*irq_table != -1) {
150 if (!request_irq(*irq_table, legacy_empty_irq_handler,
151 IRQF_PROBE_SHARED, "Test IRQ",
152 (void *)irq_table)) {
153 free_irq(*irq_table, (void *) irq_table);
154 return *irq_table;
155 }
156 irq_table++;
157 }
158 return -1;
159}
160
92static unsigned int ncr_53c400a_ports[] = { 161static unsigned int ncr_53c400a_ports[] = {
93 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0 162 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
94}; 163};
@@ -101,6 +170,9 @@ static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
101static u8 hp_c2502_magic[] = { /* HP C2502 */ 170static u8 hp_c2502_magic[] = { /* HP C2502 */
102 0x0f, 0x22, 0xf0, 0x20, 0x80 171 0x0f, 0x22, 0xf0, 0x20, 0x80
103}; 172};
173static int hp_c2502_irqs[] = {
174 9, 5, 7, 3, 4, -1
175};
104 176
105static int generic_NCR5380_init_one(struct scsi_host_template *tpnt, 177static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
106 struct device *pdev, int base, int irq, int board) 178 struct device *pdev, int base, int irq, int board)
@@ -248,6 +320,13 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
248 } 320 }
249 } 321 }
250 322
323 /* Check for vacant slot */
324 NCR5380_write(MODE_REG, 0);
325 if (NCR5380_read(MODE_REG) != 0) {
326 ret = -ENODEV;
327 goto out_unregister;
328 }
329
251 ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP); 330 ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
252 if (ret) 331 if (ret)
253 goto out_unregister; 332 goto out_unregister;
@@ -262,31 +341,59 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
262 341
263 NCR5380_maybe_reset_bus(instance); 342 NCR5380_maybe_reset_bus(instance);
264 343
265 if (irq != IRQ_AUTO)
266 instance->irq = irq;
267 else
268 instance->irq = NCR5380_probe_irq(instance, 0xffff);
269
270 /* Compatibility with documented NCR5380 kernel parameters */ 344 /* Compatibility with documented NCR5380 kernel parameters */
271 if (instance->irq == 255) 345 if (irq == 255 || irq == 0)
272 instance->irq = NO_IRQ; 346 irq = NO_IRQ;
347 else if (irq == -1)
348 irq = IRQ_AUTO;
349
350 if (board == BOARD_HP_C2502) {
351 int *irq_table = hp_c2502_irqs;
352 int board_irq = -1;
353
354 switch (irq) {
355 case NO_IRQ:
356 board_irq = 0;
357 break;
358 case IRQ_AUTO:
359 board_irq = legacy_find_free_irq(irq_table);
360 break;
361 default:
362 while (*irq_table != -1)
363 if (*irq_table++ == irq)
364 board_irq = irq;
365 }
366
367 if (board_irq <= 0) {
368 board_irq = 0;
369 irq = NO_IRQ;
370 }
371
372 magic_configure(port_idx, board_irq, magic);
373 }
374
375 if (irq == IRQ_AUTO) {
376 instance->irq = g_NCR5380_probe_irq(instance);
377 if (instance->irq == NO_IRQ)
378 shost_printk(KERN_INFO, instance, "no irq detected\n");
379 } else {
380 instance->irq = irq;
381 if (instance->irq == NO_IRQ)
382 shost_printk(KERN_INFO, instance, "no irq provided\n");
383 }
273 384
274 if (instance->irq != NO_IRQ) { 385 if (instance->irq != NO_IRQ) {
275 /* set IRQ for HP C2502 */
276 if (board == BOARD_HP_C2502)
277 magic_configure(port_idx, instance->irq, magic);
278 if (request_irq(instance->irq, generic_NCR5380_intr, 386 if (request_irq(instance->irq, generic_NCR5380_intr,
279 0, "NCR5380", instance)) { 387 0, "NCR5380", instance)) {
280 printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
281 instance->irq = NO_IRQ; 388 instance->irq = NO_IRQ;
389 shost_printk(KERN_INFO, instance,
390 "irq %d denied\n", instance->irq);
391 } else {
392 shost_printk(KERN_INFO, instance,
393 "irq %d acquired\n", instance->irq);
282 } 394 }
283 } 395 }
284 396
285 if (instance->irq == NO_IRQ) {
286 printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
287 printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
288 }
289
290 ret = scsi_add_host(instance, pdev); 397 ret = scsi_add_host(instance, pdev);
291 if (ret) 398 if (ret)
292 goto out_free_irq; 399 goto out_free_irq;
@@ -597,7 +704,7 @@ static int __init generic_NCR5380_init(void)
597 int ret = 0; 704 int ret = 0;
598 705
599 /* compatibility with old-style parameters */ 706 /* compatibility with old-style parameters */
600 if (irq[0] == 0 && base[0] == 0 && card[0] == -1) { 707 if (irq[0] == -1 && base[0] == 0 && card[0] == -1) {
601 irq[0] = ncr_irq; 708 irq[0] = ncr_irq;
602 base[0] = ncr_addr; 709 base[0] = ncr_addr;
603 if (ncr_5380) 710 if (ncr_5380)
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index 3ce5b65ccb00..81b22d989648 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -51,4 +51,6 @@
51#define BOARD_DTC3181E 3 51#define BOARD_DTC3181E 3
52#define BOARD_HP_C2502 4 52#define BOARD_HP_C2502 4
53 53
54#define IRQ_AUTO 254
55
54#endif /* GENERIC_NCR5380_H */ 56#endif /* GENERIC_NCR5380_H */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 691a09316952..cbc0c5fe5a60 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1557,10 +1557,9 @@ static void hpsa_monitor_offline_device(struct ctlr_info *h,
1557 1557
1558 /* Device is not on the list, add it. */ 1558 /* Device is not on the list, add it. */
1559 device = kmalloc(sizeof(*device), GFP_KERNEL); 1559 device = kmalloc(sizeof(*device), GFP_KERNEL);
1560 if (!device) { 1560 if (!device)
1561 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1562 return; 1561 return;
1563 } 1562
1564 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 1563 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1565 spin_lock_irqsave(&h->offline_device_lock, flags); 1564 spin_lock_irqsave(&h->offline_device_lock, flags);
1566 list_add_tail(&device->offline_list, &h->offline_device_list); 1565 list_add_tail(&device->offline_list, &h->offline_device_list);
@@ -2142,17 +2141,15 @@ static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2142 2141
2143 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, 2142 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
2144 GFP_KERNEL); 2143 GFP_KERNEL);
2145 if (!h->cmd_sg_list) { 2144 if (!h->cmd_sg_list)
2146 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
2147 return -ENOMEM; 2145 return -ENOMEM;
2148 } 2146
2149 for (i = 0; i < h->nr_cmds; i++) { 2147 for (i = 0; i < h->nr_cmds; i++) {
2150 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * 2148 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
2151 h->chainsize, GFP_KERNEL); 2149 h->chainsize, GFP_KERNEL);
2152 if (!h->cmd_sg_list[i]) { 2150 if (!h->cmd_sg_list[i])
2153 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
2154 goto clean; 2151 goto clean;
2155 } 2152
2156 } 2153 }
2157 return 0; 2154 return 0;
2158 2155
@@ -3454,11 +3451,8 @@ static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3454 struct bmic_sense_subsystem_info *ssi; 3451 struct bmic_sense_subsystem_info *ssi;
3455 3452
3456 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); 3453 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3457 if (ssi == NULL) { 3454 if (!ssi)
3458 dev_warn(&h->pdev->dev,
3459 "%s: out of memory\n", __func__);
3460 return; 3455 return;
3461 }
3462 3456
3463 rc = hpsa_bmic_sense_subsystem_information(h, 3457 rc = hpsa_bmic_sense_subsystem_information(h,
3464 scsi3addr, 0, ssi, sizeof(*ssi)); 3458 scsi3addr, 0, ssi, sizeof(*ssi));
@@ -4335,8 +4329,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
4335 4329
4336 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); 4330 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4337 if (!currentsd[i]) { 4331 if (!currentsd[i]) {
4338 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
4339 __FILE__, __LINE__);
4340 h->drv_req_rescan = 1; 4332 h->drv_req_rescan = 1;
4341 goto out; 4333 goto out;
4342 } 4334 }
@@ -8597,14 +8589,12 @@ static int hpsa_luns_changed(struct ctlr_info *h)
8597 */ 8589 */
8598 8590
8599 if (!h->lastlogicals) 8591 if (!h->lastlogicals)
8600 goto out; 8592 return rc;
8601 8593
8602 logdev = kzalloc(sizeof(*logdev), GFP_KERNEL); 8594 logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
8603 if (!logdev) { 8595 if (!logdev)
8604 dev_warn(&h->pdev->dev, 8596 return rc;
8605 "Out of memory, can't track lun changes.\n"); 8597
8606 goto out;
8607 }
8608 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) { 8598 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8609 dev_warn(&h->pdev->dev, 8599 dev_warn(&h->pdev->dev,
8610 "report luns failed, can't track lun changes.\n"); 8600 "report luns failed, can't track lun changes.\n");
@@ -8998,11 +8988,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
8998 return; 8988 return;
8999 8989
9000 options = kzalloc(sizeof(*options), GFP_KERNEL); 8990 options = kzalloc(sizeof(*options), GFP_KERNEL);
9001 if (!options) { 8991 if (!options)
9002 dev_err(&h->pdev->dev,
9003 "Error: failed to disable rld caching, during alloc.\n");
9004 return; 8992 return;
9005 }
9006 8993
9007 c = cmd_alloc(h); 8994 c = cmd_alloc(h);
9008 8995
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d9534ee6ef52..50cd01165e35 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -95,6 +95,7 @@ static int fast_fail = 1;
95static int client_reserve = 1; 95static int client_reserve = 1;
96static char partition_name[97] = "UNKNOWN"; 96static char partition_name[97] = "UNKNOWN";
97static unsigned int partition_number = -1; 97static unsigned int partition_number = -1;
98static LIST_HEAD(ibmvscsi_head);
98 99
99static struct scsi_transport_template *ibmvscsi_transport_template; 100static struct scsi_transport_template *ibmvscsi_transport_template;
100 101
@@ -232,6 +233,7 @@ static void ibmvscsi_task(void *data)
232 while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) { 233 while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
233 ibmvscsi_handle_crq(crq, hostdata); 234 ibmvscsi_handle_crq(crq, hostdata);
234 crq->valid = VIOSRP_CRQ_FREE; 235 crq->valid = VIOSRP_CRQ_FREE;
236 wmb();
235 } 237 }
236 238
237 vio_enable_interrupts(vdev); 239 vio_enable_interrupts(vdev);
@@ -240,6 +242,7 @@ static void ibmvscsi_task(void *data)
240 vio_disable_interrupts(vdev); 242 vio_disable_interrupts(vdev);
241 ibmvscsi_handle_crq(crq, hostdata); 243 ibmvscsi_handle_crq(crq, hostdata);
242 crq->valid = VIOSRP_CRQ_FREE; 244 crq->valid = VIOSRP_CRQ_FREE;
245 wmb();
243 } else { 246 } else {
244 done = 1; 247 done = 1;
245 } 248 }
@@ -992,7 +995,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
992 if (unlikely(rsp->opcode != SRP_RSP)) { 995 if (unlikely(rsp->opcode != SRP_RSP)) {
993 if (printk_ratelimit()) 996 if (printk_ratelimit())
994 dev_warn(evt_struct->hostdata->dev, 997 dev_warn(evt_struct->hostdata->dev,
995 "bad SRP RSP type %d\n", rsp->opcode); 998 "bad SRP RSP type %#02x\n", rsp->opcode);
996 } 999 }
997 1000
998 if (cmnd) { 1001 if (cmnd) {
@@ -2270,6 +2273,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
2270 } 2273 }
2271 2274
2272 dev_set_drvdata(&vdev->dev, hostdata); 2275 dev_set_drvdata(&vdev->dev, hostdata);
2276 list_add_tail(&hostdata->host_list, &ibmvscsi_head);
2273 return 0; 2277 return 0;
2274 2278
2275 add_srp_port_failed: 2279 add_srp_port_failed:
@@ -2291,6 +2295,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
2291static int ibmvscsi_remove(struct vio_dev *vdev) 2295static int ibmvscsi_remove(struct vio_dev *vdev)
2292{ 2296{
2293 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); 2297 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
2298 list_del(&hostdata->host_list);
2294 unmap_persist_bufs(hostdata); 2299 unmap_persist_bufs(hostdata);
2295 release_event_pool(&hostdata->pool, hostdata); 2300 release_event_pool(&hostdata->pool, hostdata);
2296 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, 2301 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index e0f6c3aeb4ee..3a7875575616 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -90,6 +90,7 @@ struct event_pool {
90 90
91/* all driver data associated with a host adapter */ 91/* all driver data associated with a host adapter */
92struct ibmvscsi_host_data { 92struct ibmvscsi_host_data {
93 struct list_head host_list;
93 atomic_t request_limit; 94 atomic_t request_limit;
94 int client_migrated; 95 int client_migrated;
95 int reset_crq; 96 int reset_crq;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 8de0eda8cd00..394fe1338d09 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -402,6 +402,9 @@ struct MPT3SAS_DEVICE {
402 u8 block; 402 u8 block;
403 u8 tlr_snoop_check; 403 u8 tlr_snoop_check;
404 u8 ignore_delay_remove; 404 u8 ignore_delay_remove;
405 /* Iopriority Command Handling */
406 u8 ncq_prio_enable;
407
405}; 408};
406 409
407#define MPT3_CMD_NOT_USED 0x8000 /* free */ 410#define MPT3_CMD_NOT_USED 0x8000 /* free */
@@ -1458,4 +1461,7 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
1458 struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request, 1461 struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
1459 u16 smid); 1462 u16 smid);
1460 1463
1464/* NCQ Prio Handling Check */
1465bool scsih_ncq_prio_supp(struct scsi_device *sdev);
1466
1461#endif /* MPT3SAS_BASE_H_INCLUDED */ 1467#endif /* MPT3SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 050bd788ad02..95f0f24bac05 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -3325,8 +3325,6 @@ static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
3325 3325
3326/*********** diagnostic trigger suppport *** END ****************************/ 3326/*********** diagnostic trigger suppport *** END ****************************/
3327 3327
3328
3329
3330/*****************************************/ 3328/*****************************************/
3331 3329
3332struct device_attribute *mpt3sas_host_attrs[] = { 3330struct device_attribute *mpt3sas_host_attrs[] = {
@@ -3402,9 +3400,50 @@ _ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
3402} 3400}
3403static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL); 3401static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
3404 3402
3403/**
3404 * _ctl_device_ncq_io_prio_show - send prioritized io commands to device
3405 * @dev - pointer to embedded device
3406 * @buf - the buffer returned
3407 *
3408 * A sysfs 'read/write' sdev attribute, only works with SATA
3409 */
3410static ssize_t
3411_ctl_device_ncq_prio_enable_show(struct device *dev,
3412 struct device_attribute *attr, char *buf)
3413{
3414 struct scsi_device *sdev = to_scsi_device(dev);
3415 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3416
3417 return snprintf(buf, PAGE_SIZE, "%d\n",
3418 sas_device_priv_data->ncq_prio_enable);
3419}
3420
3421static ssize_t
3422_ctl_device_ncq_prio_enable_store(struct device *dev,
3423 struct device_attribute *attr,
3424 const char *buf, size_t count)
3425{
3426 struct scsi_device *sdev = to_scsi_device(dev);
3427 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3428 bool ncq_prio_enable = 0;
3429
3430 if (kstrtobool(buf, &ncq_prio_enable))
3431 return -EINVAL;
3432
3433 if (!scsih_ncq_prio_supp(sdev))
3434 return -EINVAL;
3435
3436 sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
3437 return strlen(buf);
3438}
3439static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR,
3440 _ctl_device_ncq_prio_enable_show,
3441 _ctl_device_ncq_prio_enable_store);
3442
3405struct device_attribute *mpt3sas_dev_attrs[] = { 3443struct device_attribute *mpt3sas_dev_attrs[] = {
3406 &dev_attr_sas_address, 3444 &dev_attr_sas_address,
3407 &dev_attr_sas_device_handle, 3445 &dev_attr_sas_device_handle,
3446 &dev_attr_sas_ncq_prio_enable,
3408 NULL, 3447 NULL,
3409}; 3448};
3410 3449
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 5c8f75247d73..b5c966e319d3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4053,6 +4053,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4053 struct MPT3SAS_DEVICE *sas_device_priv_data; 4053 struct MPT3SAS_DEVICE *sas_device_priv_data;
4054 struct MPT3SAS_TARGET *sas_target_priv_data; 4054 struct MPT3SAS_TARGET *sas_target_priv_data;
4055 struct _raid_device *raid_device; 4055 struct _raid_device *raid_device;
4056 struct request *rq = scmd->request;
4057 int class;
4056 Mpi2SCSIIORequest_t *mpi_request; 4058 Mpi2SCSIIORequest_t *mpi_request;
4057 u32 mpi_control; 4059 u32 mpi_control;
4058 u16 smid; 4060 u16 smid;
@@ -4115,7 +4117,12 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4115 4117
4116 /* set tags */ 4118 /* set tags */
4117 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 4119 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
4118 4120 /* NCQ Prio supported, make sure control indicated high priority */
4121 if (sas_device_priv_data->ncq_prio_enable) {
4122 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
4123 if (class == IOPRIO_CLASS_RT)
4124 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
4125 }
4119 /* Make sure Device is not raid volume. 4126 /* Make sure Device is not raid volume.
4120 * We do not expose raid functionality to upper layer for warpdrive. 4127 * We do not expose raid functionality to upper layer for warpdrive.
4121 */ 4128 */
@@ -9099,6 +9106,31 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
9099 return PCI_ERS_RESULT_RECOVERED; 9106 return PCI_ERS_RESULT_RECOVERED;
9100} 9107}
9101 9108
9109/**
9110 * scsih__ncq_prio_supp - Check for NCQ command priority support
9111 * @sdev: scsi device struct
9112 *
9113 * This is called when a user indicates they would like to enable
9114 * ncq command priorities. This works only on SATA devices.
9115 */
9116bool scsih_ncq_prio_supp(struct scsi_device *sdev)
9117{
9118 unsigned char *buf;
9119 bool ncq_prio_supp = false;
9120
9121 if (!scsi_device_supports_vpd(sdev))
9122 return ncq_prio_supp;
9123
9124 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
9125 if (!buf)
9126 return ncq_prio_supp;
9127
9128 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
9129 ncq_prio_supp = (buf[213] >> 4) & 1;
9130
9131 kfree(buf);
9132 return ncq_prio_supp;
9133}
9102/* 9134/*
9103 * The pci device ids are defined in mpi/mpi2_cnfg.h. 9135 * The pci device ids are defined in mpi/mpi2_cnfg.h.
9104 */ 9136 */
diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig
new file mode 100644
index 000000000000..23ca8a274586
--- /dev/null
+++ b/drivers/scsi/qedi/Kconfig
@@ -0,0 +1,10 @@
1config QEDI
2 tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support"
3 depends on PCI && SCSI
4 depends on QED
5 select SCSI_ISCSI_ATTRS
6 select QED_LL2
7 select QED_ISCSI
8 ---help---
9 This driver supports iSCSI offload for the QLogic FastLinQ
10 41000 Series Converged Network Adapters.
diff --git a/drivers/scsi/qedi/Makefile b/drivers/scsi/qedi/Makefile
new file mode 100644
index 000000000000..2b3e16b24299
--- /dev/null
+++ b/drivers/scsi/qedi/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_QEDI) := qedi.o
2qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \
3 qedi_dbg.o
4
5qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
new file mode 100644
index 000000000000..5ca3e8c28a3f
--- /dev/null
+++ b/drivers/scsi/qedi/qedi.h
@@ -0,0 +1,364 @@
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#ifndef _QEDI_H_
11#define _QEDI_H_
12
13#define __PREVENT_QED_HSI__
14
15#include <scsi/scsi_transport_iscsi.h>
16#include <scsi/libiscsi.h>
17#include <scsi/scsi_host.h>
18#include <linux/uio_driver.h>
19
20#include "qedi_hsi.h"
21#include <linux/qed/qed_if.h>
22#include "qedi_dbg.h"
23#include <linux/qed/qed_iscsi_if.h>
24#include <linux/qed/qed_ll2_if.h>
25#include "qedi_version.h"
26
27#define QEDI_MODULE_NAME "qedi"
28
29struct qedi_endpoint;
30
31/*
32 * PCI function probe defines
33 */
34#define QEDI_MODE_NORMAL 0
35#define QEDI_MODE_RECOVERY 1
36
37#define ISCSI_WQE_SET_PTU_INVALIDATE 1
38#define QEDI_MAX_ISCSI_TASK 4096
39#define QEDI_MAX_TASK_NUM 0x0FFF
40#define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024
41#define QEDI_ISCSI_MAX_BDS_PER_CMD 256 /* Firmware max BDs is 256 */
42#define MAX_OUSTANDING_TASKS_PER_CON 1024
43
44#define QEDI_MAX_BD_LEN 0xffff
45#define QEDI_BD_SPLIT_SZ 0x1000
46#define QEDI_PAGE_SIZE 4096
47#define QEDI_FAST_SGE_COUNT 4
48/* MAX Length for cached SGL */
49#define MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1)
50
51#define MAX_NUM_MSIX_PF 8
52#define MIN_NUM_CPUS_MSIX(x) min((x)->msix_count, num_online_cpus())
53
54#define QEDI_LOCAL_PORT_MIN 60000
55#define QEDI_LOCAL_PORT_MAX 61024
56#define QEDI_LOCAL_PORT_RANGE (QEDI_LOCAL_PORT_MAX - QEDI_LOCAL_PORT_MIN)
57#define QEDI_LOCAL_PORT_INVALID 0xffff
58#define TX_RX_RING 16
59#define RX_RING (TX_RX_RING - 1)
60#define LL2_SINGLE_BUF_SIZE 0x400
61#define QEDI_PAGE_SIZE 4096
62#define QEDI_PAGE_ALIGN(addr) ALIGN(addr, QEDI_PAGE_SIZE)
63#define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1))
64
65#define QEDI_PAGE_SIZE 4096
66#define QEDI_PATH_HANDLE 0xFE0000000UL
67
68struct qedi_uio_ctrl {
69 /* meta data */
70 u32 uio_hsi_version;
71
72 /* user writes */
73 u32 host_tx_prod;
74 u32 host_rx_cons;
75 u32 host_rx_bd_cons;
76 u32 host_tx_pkt_len;
77 u32 host_rx_cons_cnt;
78
79 /* driver writes */
80 u32 hw_tx_cons;
81 u32 hw_rx_prod;
82 u32 hw_rx_bd_prod;
83 u32 hw_rx_prod_cnt;
84
85 /* other */
86 u8 mac_addr[6];
87 u8 reserve[2];
88};
89
90struct qedi_rx_bd {
91 u32 rx_pkt_index;
92 u32 rx_pkt_len;
93 u16 vlan_id;
94};
95
96#define QEDI_RX_DESC_CNT (QEDI_PAGE_SIZE / sizeof(struct qedi_rx_bd))
97#define QEDI_MAX_RX_DESC_CNT (QEDI_RX_DESC_CNT - 1)
98#define QEDI_NUM_RX_BD (QEDI_RX_DESC_CNT * 1)
99#define QEDI_MAX_RX_BD (QEDI_NUM_RX_BD - 1)
100
101#define QEDI_NEXT_RX_IDX(x) ((((x) & (QEDI_MAX_RX_DESC_CNT)) == \
102 (QEDI_MAX_RX_DESC_CNT - 1)) ? \
103 (x) + 2 : (x) + 1)
104
105struct qedi_uio_dev {
106 struct uio_info qedi_uinfo;
107 u32 uio_dev;
108 struct list_head list;
109
110 u32 ll2_ring_size;
111 void *ll2_ring;
112
113 u32 ll2_buf_size;
114 void *ll2_buf;
115
116 void *rx_pkt;
117 void *tx_pkt;
118
119 struct qedi_ctx *qedi;
120 struct pci_dev *pdev;
121 void *uctrl;
122};
123
124/* List to maintain the skb pointers */
125struct skb_work_list {
126 struct list_head list;
127 struct sk_buff *skb;
128 u16 vlan_id;
129};
130
131/* Queue sizes in number of elements */
132#define QEDI_SQ_SIZE MAX_OUSTANDING_TASKS_PER_CON
133#define QEDI_CQ_SIZE 2048
134#define QEDI_CMDQ_SIZE QEDI_MAX_ISCSI_TASK
135#define QEDI_PROTO_CQ_PROD_IDX 0
136
137struct qedi_glbl_q_params {
138 u64 hw_p_cq; /* Completion queue PBL */
139 u64 hw_p_rq; /* Request queue PBL */
140 u64 hw_p_cmdq; /* Command queue PBL */
141};
142
143struct global_queue {
144 union iscsi_cqe *cq;
145 dma_addr_t cq_dma;
146 u32 cq_mem_size;
147 u32 cq_cons_idx; /* Completion queue consumer index */
148
149 void *cq_pbl;
150 dma_addr_t cq_pbl_dma;
151 u32 cq_pbl_size;
152
153};
154
155struct qedi_fastpath {
156 struct qed_sb_info *sb_info;
157 u16 sb_id;
158#define QEDI_NAME_SIZE 16
159 char name[QEDI_NAME_SIZE];
160 struct qedi_ctx *qedi;
161};
162
163/* Used to pass fastpath information needed to process CQEs */
164struct qedi_io_work {
165 struct list_head list;
166 struct iscsi_cqe_solicited cqe;
167 u16 que_idx;
168};
169
170/**
171 * struct iscsi_cid_queue - Per adapter iscsi cid queue
172 *
173 * @cid_que_base: queue base memory
174 * @cid_que: queue memory pointer
175 * @cid_q_prod_idx: produce index
176 * @cid_q_cons_idx: consumer index
177 * @cid_q_max_idx: max index. used to detect wrap around condition
178 * @cid_free_cnt: queue size
179 * @conn_cid_tbl: iscsi cid to conn structure mapping table
180 *
181 * Per adapter iSCSI CID Queue
182 */
183struct iscsi_cid_queue {
184 void *cid_que_base;
185 u32 *cid_que;
186 u32 cid_q_prod_idx;
187 u32 cid_q_cons_idx;
188 u32 cid_q_max_idx;
189 u32 cid_free_cnt;
190 struct qedi_conn **conn_cid_tbl;
191};
192
193struct qedi_portid_tbl {
194 spinlock_t lock; /* Port id lock */
195 u16 start;
196 u16 max;
197 u16 next;
198 unsigned long *table;
199};
200
201struct qedi_itt_map {
202 __le32 itt;
203 struct qedi_cmd *p_cmd;
204};
205
206/* I/O tracing entry */
207#define QEDI_IO_TRACE_SIZE 2048
208struct qedi_io_log {
209#define QEDI_IO_TRACE_REQ 0
210#define QEDI_IO_TRACE_RSP 1
211 u8 direction;
212 u16 task_id;
213 u32 cid;
214 u32 port_id; /* Remote port fabric ID */
215 int lun;
216 u8 op; /* SCSI CDB */
217 u8 lba[4];
218 unsigned int bufflen; /* SCSI buffer length */
219 unsigned int sg_count; /* Number of SG elements */
220 u8 fast_sgs; /* number of fast sgls */
221 u8 slow_sgs; /* number of slow sgls */
222 u8 cached_sgs; /* number of cached sgls */
223 int result; /* Result passed back to mid-layer */
224 unsigned long jiffies; /* Time stamp when I/O logged */
225 int refcount; /* Reference count for task id */
226 unsigned int blk_req_cpu; /* CPU that the task is queued on by
227 * blk layer
228 */
229 unsigned int req_cpu; /* CPU that the task is queued on */
230 unsigned int intr_cpu; /* Interrupt CPU that the task is received on */
231 unsigned int blk_rsp_cpu;/* CPU that task is actually processed and
232 * returned to blk layer
233 */
234 bool cached_sge;
235 bool slow_sge;
236 bool fast_sge;
237};
238
239/* Number of entries in BDQ */
240#define QEDI_BDQ_NUM 256
241#define QEDI_BDQ_BUF_SIZE 256
242
243/* DMA coherent buffers for BDQ */
244struct qedi_bdq_buf {
245 void *buf_addr;
246 dma_addr_t buf_dma;
247};
248
249/* Main port level struct */
250struct qedi_ctx {
251 struct qedi_dbg_ctx dbg_ctx;
252 struct Scsi_Host *shost;
253 struct pci_dev *pdev;
254 struct qed_dev *cdev;
255 struct qed_dev_iscsi_info dev_info;
256 struct qed_int_info int_info;
257 struct qedi_glbl_q_params *p_cpuq;
258 struct global_queue **global_queues;
259 /* uio declaration */
260 struct qedi_uio_dev *udev;
261 struct list_head ll2_skb_list;
262 spinlock_t ll2_lock; /* Light L2 lock */
263 spinlock_t hba_lock; /* per port lock */
264 struct task_struct *ll2_recv_thread;
265 unsigned long flags;
266#define UIO_DEV_OPENED 1
267#define QEDI_IOTHREAD_WAKE 2
268#define QEDI_IN_RECOVERY 5
269#define QEDI_IN_OFFLINE 6
270
271 u8 mac[ETH_ALEN];
272 u32 src_ip[4];
273 u8 ip_type;
274
275 /* Physical address of above array */
276 dma_addr_t hw_p_cpuq;
277
278 struct qedi_bdq_buf bdq[QEDI_BDQ_NUM];
279 void *bdq_pbl;
280 dma_addr_t bdq_pbl_dma;
281 size_t bdq_pbl_mem_size;
282 void *bdq_pbl_list;
283 dma_addr_t bdq_pbl_list_dma;
284 u8 bdq_pbl_list_num_entries;
285 void __iomem *bdq_primary_prod;
286 void __iomem *bdq_secondary_prod;
287 u16 bdq_prod_idx;
288 u16 rq_num_entries;
289
290 u32 msix_count;
291 u32 max_sqes;
292 u8 num_queues;
293 u32 max_active_conns;
294
295 struct iscsi_cid_queue cid_que;
296 struct qedi_endpoint **ep_tbl;
297 struct qedi_portid_tbl lcl_port_tbl;
298
299 /* Rx fast path intr context */
300 struct qed_sb_info *sb_array;
301 struct qedi_fastpath *fp_array;
302 struct qed_iscsi_tid tasks;
303
304#define QEDI_LINK_DOWN 0
305#define QEDI_LINK_UP 1
306 atomic_t link_state;
307
308#define QEDI_RESERVE_TASK_ID 0
309#define MAX_ISCSI_TASK_ENTRIES 4096
310#define QEDI_INVALID_TASK_ID (MAX_ISCSI_TASK_ENTRIES + 1)
311 unsigned long task_idx_map[MAX_ISCSI_TASK_ENTRIES / BITS_PER_LONG];
312 struct qedi_itt_map *itt_map;
313 u16 tid_reuse_count[QEDI_MAX_ISCSI_TASK];
314 struct qed_pf_params pf_params;
315
316 struct workqueue_struct *tmf_thread;
317 struct workqueue_struct *offload_thread;
318
319 u16 ll2_mtu;
320
321 struct workqueue_struct *dpc_wq;
322
323 spinlock_t task_idx_lock; /* To protect gbl context */
324 s32 last_tidx_alloc;
325 s32 last_tidx_clear;
326
327 struct qedi_io_log io_trace_buf[QEDI_IO_TRACE_SIZE];
328 spinlock_t io_trace_lock; /* prtect trace Log buf */
329 u16 io_trace_idx;
330 unsigned int intr_cpu;
331 u32 cached_sgls;
332 bool use_cached_sge;
333 u32 slow_sgls;
334 bool use_slow_sge;
335 u32 fast_sgls;
336 bool use_fast_sge;
337
338 atomic_t num_offloads;
339};
340
341struct qedi_work {
342 struct list_head list;
343 struct qedi_ctx *qedi;
344 union iscsi_cqe cqe;
345 u16 que_idx;
346 bool is_solicited;
347};
348
349struct qedi_percpu_s {
350 struct task_struct *iothread;
351 struct list_head work_list;
352 spinlock_t p_work_lock; /* Per cpu worker lock */
353};
354
355static inline void *qedi_get_task_mem(struct qed_iscsi_tid *info, u32 tid)
356{
357 return (info->blocks[tid / info->num_tids_per_block] +
358 (tid % info->num_tids_per_block) * info->size);
359}
360
361#define QEDI_U64_HI(val) ((u32)(((u64)(val)) >> 32))
362#define QEDI_U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
363
364#endif /* _QEDI_H_ */
diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c
new file mode 100644
index 000000000000..2bdedb9c39bc
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_dbg.c
@@ -0,0 +1,143 @@
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#include "qedi_dbg.h"
11#include <linux/vmalloc.h>
12
13void
14qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
15 const char *fmt, ...)
16{
17 va_list va;
18 struct va_format vaf;
19 char nfunc[32];
20
21 memset(nfunc, 0, sizeof(nfunc));
22 memcpy(nfunc, func, sizeof(nfunc) - 1);
23
24 va_start(va, fmt);
25
26 vaf.fmt = fmt;
27 vaf.va = &va;
28
29 if (likely(qedi) && likely(qedi->pdev))
30 pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
31 nfunc, line, qedi->host_no, &vaf);
32 else
33 pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
34
35 va_end(va);
36}
37
38void
39qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
40 const char *fmt, ...)
41{
42 va_list va;
43 struct va_format vaf;
44 char nfunc[32];
45
46 memset(nfunc, 0, sizeof(nfunc));
47 memcpy(nfunc, func, sizeof(nfunc) - 1);
48
49 va_start(va, fmt);
50
51 vaf.fmt = fmt;
52 vaf.va = &va;
53
54 if (!(qedi_dbg_log & QEDI_LOG_WARN))
55 return;
56
57 if (likely(qedi) && likely(qedi->pdev))
58 pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
59 nfunc, line, qedi->host_no, &vaf);
60 else
61 pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
62
63 va_end(va);
64}
65
66void
67qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
68 const char *fmt, ...)
69{
70 va_list va;
71 struct va_format vaf;
72 char nfunc[32];
73
74 memset(nfunc, 0, sizeof(nfunc));
75 memcpy(nfunc, func, sizeof(nfunc) - 1);
76
77 va_start(va, fmt);
78
79 vaf.fmt = fmt;
80 vaf.va = &va;
81
82 if (!(qedi_dbg_log & QEDI_LOG_NOTICE))
83 return;
84
85 if (likely(qedi) && likely(qedi->pdev))
86 pr_notice("[%s]:[%s:%d]:%d: %pV",
87 dev_name(&qedi->pdev->dev), nfunc, line,
88 qedi->host_no, &vaf);
89 else
90 pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
91
92 va_end(va);
93}
94
95void
96qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
97 u32 level, const char *fmt, ...)
98{
99 va_list va;
100 struct va_format vaf;
101 char nfunc[32];
102
103 memset(nfunc, 0, sizeof(nfunc));
104 memcpy(nfunc, func, sizeof(nfunc) - 1);
105
106 va_start(va, fmt);
107
108 vaf.fmt = fmt;
109 vaf.va = &va;
110
111 if (!(qedi_dbg_log & level))
112 return;
113
114 if (likely(qedi) && likely(qedi->pdev))
115 pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
116 nfunc, line, qedi->host_no, &vaf);
117 else
118 pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
119
120 va_end(va);
121}
122
123int
124qedi_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
125{
126 int ret = 0;
127
128 for (; iter->name; iter++) {
129 ret = sysfs_create_bin_file(&shost->shost_gendev.kobj,
130 iter->attr);
131 if (ret)
132 pr_err("Unable to create sysfs %s attr, err(%d).\n",
133 iter->name, ret);
134 }
135 return ret;
136}
137
138void
139qedi_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
140{
141 for (; iter->name; iter++)
142 sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr);
143}
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
new file mode 100644
index 000000000000..c55572badfb0
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_dbg.h
@@ -0,0 +1,144 @@
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#ifndef _QEDI_DBG_H_
11#define _QEDI_DBG_H_
12
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/compiler.h>
16#include <linux/string.h>
17#include <linux/version.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
20#include <scsi/scsi_transport.h>
21#include <scsi/scsi_transport_iscsi.h>
22#include <linux/fs.h>
23
24#define __PREVENT_QED_HSI__
25#include <linux/qed/common_hsi.h>
26#include <linux/qed/qed_if.h>
27
28extern uint qedi_dbg_log;
29
30/* Debug print level definitions */
31#define QEDI_LOG_DEFAULT 0x1 /* Set default logging mask */
32#define QEDI_LOG_INFO 0x2 /* Informational logs,
33 * MAC address, WWPN, WWNN
34 */
35#define QEDI_LOG_DISC 0x4 /* Init, discovery, rport */
36#define QEDI_LOG_LL2 0x8 /* LL2, VLAN logs */
37#define QEDI_LOG_CONN 0x10 /* Connection setup, cleanup */
38#define QEDI_LOG_EVT 0x20 /* Events, link, mtu */
39#define QEDI_LOG_TIMER 0x40 /* Timer events */
40#define QEDI_LOG_MP_REQ 0x80 /* Middle Path (MP) logs */
41#define QEDI_LOG_SCSI_TM 0x100 /* SCSI Aborts, Task Mgmt */
42#define QEDI_LOG_UNSOL 0x200 /* unsolicited event logs */
43#define QEDI_LOG_IO 0x400 /* scsi cmd, completion */
44#define QEDI_LOG_MQ 0x800 /* Multi Queue logs */
45#define QEDI_LOG_BSG 0x1000 /* BSG logs */
46#define QEDI_LOG_DEBUGFS 0x2000 /* debugFS logs */
47#define QEDI_LOG_LPORT 0x4000 /* lport logs */
48#define QEDI_LOG_ELS 0x8000 /* ELS logs */
49#define QEDI_LOG_NPIV 0x10000 /* NPIV logs */
50#define QEDI_LOG_SESS 0x20000 /* Conection setup, cleanup */
51#define QEDI_LOG_UIO 0x40000 /* iSCSI UIO logs */
52#define QEDI_LOG_TID 0x80000 /* FW TID context acquire,
53 * free
54 */
55#define QEDI_TRACK_TID 0x100000 /* Track TID state. To be
56 * enabled only at module load
57 * and not run-time.
58 */
59#define QEDI_TRACK_CMD_LIST 0x300000 /* Track active cmd list nodes,
60 * done with reference to TID,
61 * hence TRACK_TID also enabled.
62 */
63#define QEDI_LOG_NOTICE 0x40000000 /* Notice logs */
64#define QEDI_LOG_WARN 0x80000000 /* Warning logs */
65
66/* Debug context structure */
67struct qedi_dbg_ctx {
68 unsigned int host_no;
69 struct pci_dev *pdev;
70#ifdef CONFIG_DEBUG_FS
71 struct dentry *bdf_dentry;
72#endif
73};
74
75#define QEDI_ERR(pdev, fmt, ...) \
76 qedi_dbg_err(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
77#define QEDI_WARN(pdev, fmt, ...) \
78 qedi_dbg_warn(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
79#define QEDI_NOTICE(pdev, fmt, ...) \
80 qedi_dbg_notice(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
81#define QEDI_INFO(pdev, level, fmt, ...) \
82 qedi_dbg_info(pdev, __func__, __LINE__, level, fmt, \
83 ## __VA_ARGS__)
84
85void qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
86 const char *fmt, ...);
87void qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
88 const char *fmt, ...);
89void qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
90 const char *fmt, ...);
91void qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
92 u32 info, const char *fmt, ...);
93
94struct Scsi_Host;
95
96struct sysfs_bin_attrs {
97 char *name;
98 struct bin_attribute *attr;
99};
100
101int qedi_create_sysfs_attr(struct Scsi_Host *shost,
102 struct sysfs_bin_attrs *iter);
103void qedi_remove_sysfs_attr(struct Scsi_Host *shost,
104 struct sysfs_bin_attrs *iter);
105
106#ifdef CONFIG_DEBUG_FS
107/* DebugFS related code */
108struct qedi_list_of_funcs {
109 char *oper_str;
110 ssize_t (*oper_func)(struct qedi_dbg_ctx *qedi);
111};
112
113struct qedi_debugfs_ops {
114 char *name;
115 struct qedi_list_of_funcs *qedi_funcs;
116};
117
118#define qedi_dbg_fileops(drv, ops) \
119{ \
120 .owner = THIS_MODULE, \
121 .open = simple_open, \
122 .read = drv##_dbg_##ops##_cmd_read, \
123 .write = drv##_dbg_##ops##_cmd_write \
124}
125
126/* Used for debugfs sequential files */
127#define qedi_dbg_fileops_seq(drv, ops) \
128{ \
129 .owner = THIS_MODULE, \
130 .open = drv##_dbg_##ops##_open, \
131 .read = seq_read, \
132 .llseek = seq_lseek, \
133 .release = single_release, \
134}
135
136void qedi_dbg_host_init(struct qedi_dbg_ctx *qedi,
137 struct qedi_debugfs_ops *dops,
138 const struct file_operations *fops);
139void qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi);
140void qedi_dbg_init(char *drv_name);
141void qedi_dbg_exit(void);
142#endif /* CONFIG_DEBUG_FS */
143
144#endif /* _QEDI_DBG_H_ */
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
new file mode 100644
index 000000000000..955936274241
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -0,0 +1,244 @@
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#include "qedi.h"
11#include "qedi_dbg.h"
12
13#include <linux/uaccess.h>
14#include <linux/debugfs.h>
15#include <linux/module.h>
16
17int do_not_recover;
18static struct dentry *qedi_dbg_root;
19
20void
21qedi_dbg_host_init(struct qedi_dbg_ctx *qedi,
22 struct qedi_debugfs_ops *dops,
23 const struct file_operations *fops)
24{
25 char host_dirname[32];
26 struct dentry *file_dentry = NULL;
27
28 sprintf(host_dirname, "host%u", qedi->host_no);
29 qedi->bdf_dentry = debugfs_create_dir(host_dirname, qedi_dbg_root);
30 if (!qedi->bdf_dentry)
31 return;
32
33 while (dops) {
34 if (!(dops->name))
35 break;
36
37 file_dentry = debugfs_create_file(dops->name, 0600,
38 qedi->bdf_dentry, qedi,
39 fops);
40 if (!file_dentry) {
41 QEDI_INFO(qedi, QEDI_LOG_DEBUGFS,
42 "Debugfs entry %s creation failed\n",
43 dops->name);
44 debugfs_remove_recursive(qedi->bdf_dentry);
45 return;
46 }
47 dops++;
48 fops++;
49 }
50}
51
52void
53qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi)
54{
55 debugfs_remove_recursive(qedi->bdf_dentry);
56 qedi->bdf_dentry = NULL;
57}
58
59void
60qedi_dbg_init(char *drv_name)
61{
62 qedi_dbg_root = debugfs_create_dir(drv_name, NULL);
63 if (!qedi_dbg_root)
64 QEDI_INFO(NULL, QEDI_LOG_DEBUGFS, "Init of debugfs failed\n");
65}
66
67void
68qedi_dbg_exit(void)
69{
70 debugfs_remove_recursive(qedi_dbg_root);
71 qedi_dbg_root = NULL;
72}
73
74static ssize_t
75qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg)
76{
77 if (!do_not_recover)
78 do_not_recover = 1;
79
80 QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
81 do_not_recover);
82 return 0;
83}
84
85static ssize_t
86qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg)
87{
88 if (do_not_recover)
89 do_not_recover = 0;
90
91 QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
92 do_not_recover);
93 return 0;
94}
95
96static struct qedi_list_of_funcs qedi_dbg_do_not_recover_ops[] = {
97 { "enable", qedi_dbg_do_not_recover_enable },
98 { "disable", qedi_dbg_do_not_recover_disable },
99 { NULL, NULL }
100};
101
102struct qedi_debugfs_ops qedi_debugfs_ops[] = {
103 { "gbl_ctx", NULL },
104 { "do_not_recover", qedi_dbg_do_not_recover_ops},
105 { "io_trace", NULL },
106 { NULL, NULL }
107};
108
109static ssize_t
110qedi_dbg_do_not_recover_cmd_write(struct file *filp, const char __user *buffer,
111 size_t count, loff_t *ppos)
112{
113 size_t cnt = 0;
114 struct qedi_dbg_ctx *qedi_dbg =
115 (struct qedi_dbg_ctx *)filp->private_data;
116 struct qedi_list_of_funcs *lof = qedi_dbg_do_not_recover_ops;
117
118 if (*ppos)
119 return 0;
120
121 while (lof) {
122 if (!(lof->oper_str))
123 break;
124
125 if (!strncmp(lof->oper_str, buffer, strlen(lof->oper_str))) {
126 cnt = lof->oper_func(qedi_dbg);
127 break;
128 }
129
130 lof++;
131 }
132 return (count - cnt);
133}
134
135static ssize_t
136qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
137 size_t count, loff_t *ppos)
138{
139 size_t cnt = 0;
140
141 if (*ppos)
142 return 0;
143
144 cnt = sprintf(buffer, "do_not_recover=%d\n", do_not_recover);
145 cnt = min_t(int, count, cnt - *ppos);
146 *ppos += cnt;
147 return cnt;
148}
149
150static int
151qedi_gbl_ctx_show(struct seq_file *s, void *unused)
152{
153 struct qedi_fastpath *fp = NULL;
154 struct qed_sb_info *sb_info = NULL;
155 struct status_block *sb = NULL;
156 struct global_queue *que = NULL;
157 int id;
158 u16 prod_idx;
159 struct qedi_ctx *qedi = s->private;
160 unsigned long flags;
161
162 seq_puts(s, " DUMP CQ CONTEXT:\n");
163
164 for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
165 spin_lock_irqsave(&qedi->hba_lock, flags);
166 seq_printf(s, "=========FAST CQ PATH [%d] ==========\n", id);
167 fp = &qedi->fp_array[id];
168 sb_info = fp->sb_info;
169 sb = sb_info->sb_virt;
170 prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
171 STATUS_BLOCK_PROD_INDEX_MASK);
172 seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
173 que = qedi->global_queues[fp->sb_id];
174 seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
175 seq_printf(s, "CQ complete host memory: %d\n", fp->sb_id);
176 seq_puts(s, "=========== END ==================\n\n\n");
177 spin_unlock_irqrestore(&qedi->hba_lock, flags);
178 }
179 return 0;
180}
181
182static int
183qedi_dbg_gbl_ctx_open(struct inode *inode, struct file *file)
184{
185 struct qedi_dbg_ctx *qedi_dbg = inode->i_private;
186 struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx,
187 dbg_ctx);
188
189 return single_open(file, qedi_gbl_ctx_show, qedi);
190}
191
192static int
193qedi_io_trace_show(struct seq_file *s, void *unused)
194{
195 int id, idx = 0;
196 struct qedi_ctx *qedi = s->private;
197 struct qedi_io_log *io_log;
198 unsigned long flags;
199
200 seq_puts(s, " DUMP IO LOGS:\n");
201 spin_lock_irqsave(&qedi->io_trace_lock, flags);
202 idx = qedi->io_trace_idx;
203 for (id = 0; id < QEDI_IO_TRACE_SIZE; id++) {
204 io_log = &qedi->io_trace_buf[idx];
205 seq_printf(s, "iodir-%d:", io_log->direction);
206 seq_printf(s, "tid-0x%x:", io_log->task_id);
207 seq_printf(s, "cid-0x%x:", io_log->cid);
208 seq_printf(s, "lun-%d:", io_log->lun);
209 seq_printf(s, "op-0x%02x:", io_log->op);
210 seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0],
211 io_log->lba[1], io_log->lba[2], io_log->lba[3]);
212 seq_printf(s, "buflen-%d:", io_log->bufflen);
213 seq_printf(s, "sgcnt-%d:", io_log->sg_count);
214 seq_printf(s, "res-0x%08x:", io_log->result);
215 seq_printf(s, "jif-%lu:", io_log->jiffies);
216 seq_printf(s, "blk_req_cpu-%d:", io_log->blk_req_cpu);
217 seq_printf(s, "req_cpu-%d:", io_log->req_cpu);
218 seq_printf(s, "intr_cpu-%d:", io_log->intr_cpu);
219 seq_printf(s, "blk_rsp_cpu-%d\n", io_log->blk_rsp_cpu);
220
221 idx++;
222 if (idx == QEDI_IO_TRACE_SIZE)
223 idx = 0;
224 }
225 spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
226 return 0;
227}
228
229static int
230qedi_dbg_io_trace_open(struct inode *inode, struct file *file)
231{
232 struct qedi_dbg_ctx *qedi_dbg = inode->i_private;
233 struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx,
234 dbg_ctx);
235
236 return single_open(file, qedi_io_trace_show, qedi);
237}
238
239const struct file_operations qedi_dbg_fops[] = {
240 qedi_dbg_fileops_seq(qedi, gbl_ctx),
241 qedi_dbg_fileops(qedi, do_not_recover),
242 qedi_dbg_fileops_seq(qedi, io_trace),
243 { NULL, NULL },
244};
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
new file mode 100644
index 000000000000..b1d3904ae8fd
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -0,0 +1,2378 @@
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#include <linux/blkdev.h>
11#include <scsi/scsi_tcq.h>
12#include <linux/delay.h>
13
14#include "qedi.h"
15#include "qedi_iscsi.h"
16#include "qedi_gbl.h"
17
18static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
19 struct iscsi_task *mtask);
20
21void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd)
22{
23 struct scsi_cmnd *sc = cmd->scsi_cmd;
24
25 if (cmd->io_tbl.sge_valid && sc) {
26 cmd->io_tbl.sge_valid = 0;
27 scsi_dma_unmap(sc);
28 }
29}
30
31static void qedi_process_logout_resp(struct qedi_ctx *qedi,
32 union iscsi_cqe *cqe,
33 struct iscsi_task *task,
34 struct qedi_conn *qedi_conn)
35{
36 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
37 struct iscsi_logout_rsp *resp_hdr;
38 struct iscsi_session *session = conn->session;
39 struct iscsi_logout_response_hdr *cqe_logout_response;
40 struct qedi_cmd *cmd;
41
42 cmd = (struct qedi_cmd *)task->dd_data;
43 cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response;
44 spin_lock(&session->back_lock);
45 resp_hdr = (struct iscsi_logout_rsp *)&qedi_conn->gen_pdu.resp_hdr;
46 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
47 resp_hdr->opcode = cqe_logout_response->opcode;
48 resp_hdr->flags = cqe_logout_response->flags;
49 resp_hdr->hlength = 0;
50
51 resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
52 resp_hdr->statsn = cpu_to_be32(cqe_logout_response->stat_sn);
53 resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
54 resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
55
56 resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait);
57 resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain);
58
59 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
60 "Freeing tid=0x%x for cid=0x%x\n",
61 cmd->task_id, qedi_conn->iscsi_conn_id);
62
63 if (likely(cmd->io_cmd_in_list)) {
64 cmd->io_cmd_in_list = false;
65 list_del_init(&cmd->io_cmd);
66 qedi_conn->active_cmd_count--;
67 } else {
68 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
69 "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
70 cmd->task_id, qedi_conn->iscsi_conn_id,
71 &cmd->io_cmd);
72 }
73
74 cmd->state = RESPONSE_RECEIVED;
75 qedi_clear_task_idx(qedi, cmd->task_id);
76 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
77
78 spin_unlock(&session->back_lock);
79}
80
81static void qedi_process_text_resp(struct qedi_ctx *qedi,
82 union iscsi_cqe *cqe,
83 struct iscsi_task *task,
84 struct qedi_conn *qedi_conn)
85{
86 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
87 struct iscsi_session *session = conn->session;
88 struct iscsi_task_context *task_ctx;
89 struct iscsi_text_rsp *resp_hdr_ptr;
90 struct iscsi_text_response_hdr *cqe_text_response;
91 struct qedi_cmd *cmd;
92 int pld_len;
93 u32 *tmp;
94
95 cmd = (struct qedi_cmd *)task->dd_data;
96 task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
97
98 cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response;
99 spin_lock(&session->back_lock);
100 resp_hdr_ptr = (struct iscsi_text_rsp *)&qedi_conn->gen_pdu.resp_hdr;
101 memset(resp_hdr_ptr, 0, sizeof(struct iscsi_hdr));
102 resp_hdr_ptr->opcode = cqe_text_response->opcode;
103 resp_hdr_ptr->flags = cqe_text_response->flags;
104 resp_hdr_ptr->hlength = 0;
105
106 hton24(resp_hdr_ptr->dlength,
107 (cqe_text_response->hdr_second_dword &
108 ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK));
109 tmp = (u32 *)resp_hdr_ptr->dlength;
110
111 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
112 conn->session->age);
113 resp_hdr_ptr->ttt = cqe_text_response->ttt;
114 resp_hdr_ptr->statsn = cpu_to_be32(cqe_text_response->stat_sn);
115 resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_text_response->exp_cmd_sn);
116 resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_text_response->max_cmd_sn);
117
118 pld_len = cqe_text_response->hdr_second_dword &
119 ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK;
120 qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
121
122 memset(task_ctx, '\0', sizeof(*task_ctx));
123
124 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
125 "Freeing tid=0x%x for cid=0x%x\n",
126 cmd->task_id, qedi_conn->iscsi_conn_id);
127
128 if (likely(cmd->io_cmd_in_list)) {
129 cmd->io_cmd_in_list = false;
130 list_del_init(&cmd->io_cmd);
131 qedi_conn->active_cmd_count--;
132 } else {
133 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
134 "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
135 cmd->task_id, qedi_conn->iscsi_conn_id,
136 &cmd->io_cmd);
137 }
138
139 cmd->state = RESPONSE_RECEIVED;
140 qedi_clear_task_idx(qedi, cmd->task_id);
141
142 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
143 qedi_conn->gen_pdu.resp_buf,
144 (qedi_conn->gen_pdu.resp_wr_ptr -
145 qedi_conn->gen_pdu.resp_buf));
146 spin_unlock(&session->back_lock);
147}
148
149static void qedi_tmf_resp_work(struct work_struct *work)
150{
151 struct qedi_cmd *qedi_cmd =
152 container_of(work, struct qedi_cmd, tmf_work);
153 struct qedi_conn *qedi_conn = qedi_cmd->conn;
154 struct qedi_ctx *qedi = qedi_conn->qedi;
155 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
156 struct iscsi_session *session = conn->session;
157 struct iscsi_tm_rsp *resp_hdr_ptr;
158 struct iscsi_cls_session *cls_sess;
159 int rval = 0;
160
161 set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
162 resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
163 cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
164
165 iscsi_block_session(session->cls_session);
166 rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
167 if (rval) {
168 clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
169 qedi_clear_task_idx(qedi, qedi_cmd->task_id);
170 iscsi_unblock_session(session->cls_session);
171 return;
172 }
173
174 iscsi_unblock_session(session->cls_session);
175 qedi_clear_task_idx(qedi, qedi_cmd->task_id);
176
177 spin_lock(&session->back_lock);
178 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
179 spin_unlock(&session->back_lock);
180 kfree(resp_hdr_ptr);
181 clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
182}
183
184static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
185 union iscsi_cqe *cqe,
186 struct iscsi_task *task,
187 struct qedi_conn *qedi_conn)
188
189{
190 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
191 struct iscsi_session *session = conn->session;
192 struct iscsi_tmf_response_hdr *cqe_tmp_response;
193 struct iscsi_tm_rsp *resp_hdr_ptr;
194 struct iscsi_tm *tmf_hdr;
195 struct qedi_cmd *qedi_cmd = NULL;
196 u32 *tmp;
197
198 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
199
200 qedi_cmd = task->dd_data;
201 qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL);
202 if (!qedi_cmd->tmf_resp_buf) {
203 QEDI_ERR(&qedi->dbg_ctx,
204 "Failed to allocate resp buf, cid=0x%x\n",
205 qedi_conn->iscsi_conn_id);
206 return;
207 }
208
209 spin_lock(&session->back_lock);
210 resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
211 memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp));
212
213 /* Fill up the header */
214 resp_hdr_ptr->opcode = cqe_tmp_response->opcode;
215 resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags;
216 resp_hdr_ptr->response = cqe_tmp_response->hdr_response;
217 resp_hdr_ptr->hlength = 0;
218
219 hton24(resp_hdr_ptr->dlength,
220 (cqe_tmp_response->hdr_second_dword &
221 ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
222 tmp = (u32 *)resp_hdr_ptr->dlength;
223 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
224 conn->session->age);
225 resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
226 resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_tmp_response->exp_cmd_sn);
227 resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn);
228
229 tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
230
231 if (likely(qedi_cmd->io_cmd_in_list)) {
232 qedi_cmd->io_cmd_in_list = false;
233 list_del_init(&qedi_cmd->io_cmd);
234 qedi_conn->active_cmd_count--;
235 }
236
237 if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
238 ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
239 ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
240 ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
241 ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
242 ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
243 INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work);
244 queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
245 goto unblock_sess;
246 }
247
248 qedi_clear_task_idx(qedi, qedi_cmd->task_id);
249
250 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
251 kfree(resp_hdr_ptr);
252
253unblock_sess:
254 spin_unlock(&session->back_lock);
255}
256
257static void qedi_process_login_resp(struct qedi_ctx *qedi,
258 union iscsi_cqe *cqe,
259 struct iscsi_task *task,
260 struct qedi_conn *qedi_conn)
261{
262 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
263 struct iscsi_session *session = conn->session;
264 struct iscsi_task_context *task_ctx;
265 struct iscsi_login_rsp *resp_hdr_ptr;
266 struct iscsi_login_response_hdr *cqe_login_response;
267 struct qedi_cmd *cmd;
268 int pld_len;
269 u32 *tmp;
270
271 cmd = (struct qedi_cmd *)task->dd_data;
272
273 cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response;
274 task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
275
276 spin_lock(&session->back_lock);
277 resp_hdr_ptr = (struct iscsi_login_rsp *)&qedi_conn->gen_pdu.resp_hdr;
278 memset(resp_hdr_ptr, 0, sizeof(struct iscsi_login_rsp));
279 resp_hdr_ptr->opcode = cqe_login_response->opcode;
280 resp_hdr_ptr->flags = cqe_login_response->flags_attr;
281 resp_hdr_ptr->hlength = 0;
282
283 hton24(resp_hdr_ptr->dlength,
284 (cqe_login_response->hdr_second_dword &
285 ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK));
286 tmp = (u32 *)resp_hdr_ptr->dlength;
287 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
288 conn->session->age);
289 resp_hdr_ptr->tsih = cqe_login_response->tsih;
290 resp_hdr_ptr->statsn = cpu_to_be32(cqe_login_response->stat_sn);
291 resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_login_response->exp_cmd_sn);
292 resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_login_response->max_cmd_sn);
293 resp_hdr_ptr->status_class = cqe_login_response->status_class;
294 resp_hdr_ptr->status_detail = cqe_login_response->status_detail;
295 pld_len = cqe_login_response->hdr_second_dword &
296 ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
297 qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
298
299 if (likely(cmd->io_cmd_in_list)) {
300 cmd->io_cmd_in_list = false;
301 list_del_init(&cmd->io_cmd);
302 qedi_conn->active_cmd_count--;
303 }
304
305 memset(task_ctx, '\0', sizeof(*task_ctx));
306
307 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
308 qedi_conn->gen_pdu.resp_buf,
309 (qedi_conn->gen_pdu.resp_wr_ptr -
310 qedi_conn->gen_pdu.resp_buf));
311
312 spin_unlock(&session->back_lock);
313 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
314 "Freeing tid=0x%x for cid=0x%x\n",
315 cmd->task_id, qedi_conn->iscsi_conn_id);
316 cmd->state = RESPONSE_RECEIVED;
317 qedi_clear_task_idx(qedi, cmd->task_id);
318}
319
320static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
321 struct iscsi_cqe_unsolicited *cqe,
322 char *ptr, int len)
323{
324 u16 idx = 0;
325
326 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
327 "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n",
328 len, qedi->bdq_prod_idx,
329 (qedi->bdq_prod_idx % qedi->rq_num_entries));
330
331 /* Obtain buffer address from rqe_opaque */
332 idx = cqe->rqe_opaque.lo;
333 if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
334 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
335 "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
336 idx);
337 return;
338 }
339
340 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
341 "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
342 cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
343
344 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
345 "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
346 switch (cqe->unsol_cqe_type) {
347 case ISCSI_CQE_UNSOLICITED_SINGLE:
348 case ISCSI_CQE_UNSOLICITED_FIRST:
349 if (len)
350 memcpy(ptr, (void *)qedi->bdq[idx].buf_addr, len);
351 break;
352 case ISCSI_CQE_UNSOLICITED_MIDDLE:
353 case ISCSI_CQE_UNSOLICITED_LAST:
354 break;
355 default:
356 break;
357 }
358}
359
360static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
361 struct iscsi_cqe_unsolicited *cqe,
362 int count)
363{
364 u16 tmp;
365 u16 idx = 0;
366 struct scsi_bd *pbl;
367
368 /* Obtain buffer address from rqe_opaque */
369 idx = cqe->rqe_opaque.lo;
370 if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
371 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
372 "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
373 idx);
374 return;
375 }
376
377 pbl = (struct scsi_bd *)qedi->bdq_pbl;
378 pbl += (qedi->bdq_prod_idx % qedi->rq_num_entries);
379 pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[idx].buf_dma));
380 pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[idx].buf_dma));
381 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
382 "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
383 pbl, pbl->address.hi, pbl->address.lo, idx);
384 pbl->opaque.hi = 0;
385 pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
386
387 /* Increment producer to let f/w know we've handled the frame */
388 qedi->bdq_prod_idx += count;
389
390 writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
391 tmp = readw(qedi->bdq_primary_prod);
392
393 writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
394 tmp = readw(qedi->bdq_secondary_prod);
395}
396
397static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi,
398 struct iscsi_cqe_unsolicited *cqe,
399 u32 pdu_len, u32 num_bdqs,
400 char *bdq_data)
401{
402 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
403 "num_bdqs [%d]\n", num_bdqs);
404
405 qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len);
406 qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1));
407}
408
409static int qedi_process_nopin_mesg(struct qedi_ctx *qedi,
410 union iscsi_cqe *cqe,
411 struct iscsi_task *task,
412 struct qedi_conn *qedi_conn, u16 que_idx)
413{
414 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
415 struct iscsi_session *session = conn->session;
416 struct iscsi_nop_in_hdr *cqe_nop_in;
417 struct iscsi_nopin *hdr;
418 struct qedi_cmd *cmd;
419 int tgt_async_nop = 0;
420 u32 lun[2];
421 u32 pdu_len, num_bdqs;
422 char bdq_data[QEDI_BDQ_BUF_SIZE];
423 unsigned long flags;
424
425 spin_lock_bh(&session->back_lock);
426 cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in;
427
428 pdu_len = cqe_nop_in->hdr_second_dword &
429 ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK;
430 num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
431
432 hdr = (struct iscsi_nopin *)&qedi_conn->gen_pdu.resp_hdr;
433 memset(hdr, 0, sizeof(struct iscsi_hdr));
434 hdr->opcode = cqe_nop_in->opcode;
435 hdr->max_cmdsn = cpu_to_be32(cqe_nop_in->max_cmd_sn);
436 hdr->exp_cmdsn = cpu_to_be32(cqe_nop_in->exp_cmd_sn);
437 hdr->statsn = cpu_to_be32(cqe_nop_in->stat_sn);
438 hdr->ttt = cpu_to_be32(cqe_nop_in->ttt);
439
440 if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
441 spin_lock_irqsave(&qedi->hba_lock, flags);
442 qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
443 pdu_len, num_bdqs, bdq_data);
444 hdr->itt = RESERVED_ITT;
445 tgt_async_nop = 1;
446 spin_unlock_irqrestore(&qedi->hba_lock, flags);
447 goto done;
448 }
449
450 /* Response to one of our nop-outs */
451 if (task) {
452 cmd = task->dd_data;
453 hdr->flags = ISCSI_FLAG_CMD_FINAL;
454 hdr->itt = build_itt(cqe->cqe_solicited.itid,
455 conn->session->age);
456 lun[0] = 0xffffffff;
457 lun[1] = 0xffffffff;
458 memcpy(&hdr->lun, lun, sizeof(struct scsi_lun));
459 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
460 "Freeing tid=0x%x for cid=0x%x\n",
461 cmd->task_id, qedi_conn->iscsi_conn_id);
462 cmd->state = RESPONSE_RECEIVED;
463 spin_lock(&qedi_conn->list_lock);
464 if (likely(cmd->io_cmd_in_list)) {
465 cmd->io_cmd_in_list = false;
466 list_del_init(&cmd->io_cmd);
467 qedi_conn->active_cmd_count--;
468 }
469
470 spin_unlock(&qedi_conn->list_lock);
471 qedi_clear_task_idx(qedi, cmd->task_id);
472 }
473
474done:
475 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, bdq_data, pdu_len);
476
477 spin_unlock_bh(&session->back_lock);
478 return tgt_async_nop;
479}
480
481static void qedi_process_async_mesg(struct qedi_ctx *qedi,
482 union iscsi_cqe *cqe,
483 struct iscsi_task *task,
484 struct qedi_conn *qedi_conn,
485 u16 que_idx)
486{
487 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
488 struct iscsi_session *session = conn->session;
489 struct iscsi_async_msg_hdr *cqe_async_msg;
490 struct iscsi_async *resp_hdr;
491 u32 lun[2];
492 u32 pdu_len, num_bdqs;
493 char bdq_data[QEDI_BDQ_BUF_SIZE];
494 unsigned long flags;
495
496 spin_lock_bh(&session->back_lock);
497
498 cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg;
499 pdu_len = cqe_async_msg->hdr_second_dword &
500 ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK;
501 num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
502
503 if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
504 spin_lock_irqsave(&qedi->hba_lock, flags);
505 qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
506 pdu_len, num_bdqs, bdq_data);
507 spin_unlock_irqrestore(&qedi->hba_lock, flags);
508 }
509
510 resp_hdr = (struct iscsi_async *)&qedi_conn->gen_pdu.resp_hdr;
511 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
512 resp_hdr->opcode = cqe_async_msg->opcode;
513 resp_hdr->flags = 0x80;
514
515 lun[0] = cpu_to_be32(cqe_async_msg->lun.lo);
516 lun[1] = cpu_to_be32(cqe_async_msg->lun.hi);
517 memcpy(&resp_hdr->lun, lun, sizeof(struct scsi_lun));
518 resp_hdr->exp_cmdsn = cpu_to_be32(cqe_async_msg->exp_cmd_sn);
519 resp_hdr->max_cmdsn = cpu_to_be32(cqe_async_msg->max_cmd_sn);
520 resp_hdr->statsn = cpu_to_be32(cqe_async_msg->stat_sn);
521
522 resp_hdr->async_event = cqe_async_msg->async_event;
523 resp_hdr->async_vcode = cqe_async_msg->async_vcode;
524
525 resp_hdr->param1 = cpu_to_be16(cqe_async_msg->param1_rsrv);
526 resp_hdr->param2 = cpu_to_be16(cqe_async_msg->param2_rsrv);
527 resp_hdr->param3 = cpu_to_be16(cqe_async_msg->param3_rsrv);
528
529 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bdq_data,
530 pdu_len);
531
532 spin_unlock_bh(&session->back_lock);
533}
534
535static void qedi_process_reject_mesg(struct qedi_ctx *qedi,
536 union iscsi_cqe *cqe,
537 struct iscsi_task *task,
538 struct qedi_conn *qedi_conn,
539 uint16_t que_idx)
540{
541 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
542 struct iscsi_session *session = conn->session;
543 struct iscsi_reject_hdr *cqe_reject;
544 struct iscsi_reject *hdr;
545 u32 pld_len, num_bdqs;
546 unsigned long flags;
547
548 spin_lock_bh(&session->back_lock);
549 cqe_reject = &cqe->cqe_common.iscsi_hdr.reject;
550 pld_len = cqe_reject->hdr_second_dword &
551 ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK;
552 num_bdqs = pld_len / QEDI_BDQ_BUF_SIZE;
553
554 if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
555 spin_lock_irqsave(&qedi->hba_lock, flags);
556 qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
557 pld_len, num_bdqs, conn->data);
558 spin_unlock_irqrestore(&qedi->hba_lock, flags);
559 }
560 hdr = (struct iscsi_reject *)&qedi_conn->gen_pdu.resp_hdr;
561 memset(hdr, 0, sizeof(struct iscsi_hdr));
562 hdr->opcode = cqe_reject->opcode;
563 hdr->reason = cqe_reject->hdr_reason;
564 hdr->flags = cqe_reject->hdr_flags;
565 hton24(hdr->dlength, (cqe_reject->hdr_second_dword &
566 ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK));
567 hdr->max_cmdsn = cpu_to_be32(cqe_reject->max_cmd_sn);
568 hdr->exp_cmdsn = cpu_to_be32(cqe_reject->exp_cmd_sn);
569 hdr->statsn = cpu_to_be32(cqe_reject->stat_sn);
570 hdr->ffffffff = cpu_to_be32(0xffffffff);
571
572 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
573 conn->data, pld_len);
574 spin_unlock_bh(&session->back_lock);
575}
576
577static void qedi_scsi_completion(struct qedi_ctx *qedi,
578 union iscsi_cqe *cqe,
579 struct iscsi_task *task,
580 struct iscsi_conn *conn)
581{
582 struct scsi_cmnd *sc_cmd;
583 struct qedi_cmd *cmd = task->dd_data;
584 struct iscsi_session *session = conn->session;
585 struct iscsi_scsi_rsp *hdr;
586 struct iscsi_data_in_hdr *cqe_data_in;
587 int datalen = 0;
588 struct qedi_conn *qedi_conn;
589 u32 iscsi_cid;
590 bool mark_cmd_node_deleted = false;
591 u8 cqe_err_bits = 0;
592
593 iscsi_cid = cqe->cqe_common.conn_id;
594 qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
595
596 cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in;
597 cqe_err_bits =
598 cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
599
600 spin_lock_bh(&session->back_lock);
601 /* get the scsi command */
602 sc_cmd = cmd->scsi_cmd;
603
604 if (!sc_cmd) {
605 QEDI_WARN(&qedi->dbg_ctx, "sc_cmd is NULL!\n");
606 goto error;
607 }
608
609 if (!sc_cmd->SCp.ptr) {
610 QEDI_WARN(&qedi->dbg_ctx,
611 "SCp.ptr is NULL, returned in another context.\n");
612 goto error;
613 }
614
615 if (!sc_cmd->request) {
616 QEDI_WARN(&qedi->dbg_ctx,
617 "sc_cmd->request is NULL, sc_cmd=%p.\n",
618 sc_cmd);
619 goto error;
620 }
621
622 if (!sc_cmd->request->special) {
623 QEDI_WARN(&qedi->dbg_ctx,
624 "request->special is NULL so request not valid, sc_cmd=%p.\n",
625 sc_cmd);
626 goto error;
627 }
628
629 if (!sc_cmd->request->q) {
630 QEDI_WARN(&qedi->dbg_ctx,
631 "request->q is NULL so request is not valid, sc_cmd=%p.\n",
632 sc_cmd);
633 goto error;
634 }
635
636 qedi_iscsi_unmap_sg_list(cmd);
637
638 hdr = (struct iscsi_scsi_rsp *)task->hdr;
639 hdr->opcode = cqe_data_in->opcode;
640 hdr->max_cmdsn = cpu_to_be32(cqe_data_in->max_cmd_sn);
641 hdr->exp_cmdsn = cpu_to_be32(cqe_data_in->exp_cmd_sn);
642 hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
643 hdr->response = cqe_data_in->reserved1;
644 hdr->cmd_status = cqe_data_in->status_rsvd;
645 hdr->flags = cqe_data_in->flags;
646 hdr->residual_count = cpu_to_be32(cqe_data_in->residual_count);
647
648 if (hdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
649 datalen = cqe_data_in->reserved2 &
650 ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK;
651 memcpy((char *)conn->data, (char *)cmd->sense_buffer, datalen);
652 }
653
654 /* If f/w reports data underrun err then set residual to IO transfer
655 * length, set Underrun flag and clear Overrun flag explicitly
656 */
657 if (unlikely(cqe_err_bits &&
658 GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_UNDER_RUN_ERR))) {
659 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
660 "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n",
661 hdr->itt, cqe_data_in->flags, cmd->task_id,
662 qedi_conn->iscsi_conn_id, hdr->residual_count,
663 scsi_bufflen(sc_cmd));
664 hdr->residual_count = cpu_to_be32(scsi_bufflen(sc_cmd));
665 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
666 hdr->flags &= (~ISCSI_FLAG_CMD_OVERFLOW);
667 }
668
669 spin_lock(&qedi_conn->list_lock);
670 if (likely(cmd->io_cmd_in_list)) {
671 cmd->io_cmd_in_list = false;
672 list_del_init(&cmd->io_cmd);
673 qedi_conn->active_cmd_count--;
674 mark_cmd_node_deleted = true;
675 }
676 spin_unlock(&qedi_conn->list_lock);
677
678 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
679 "Freeing tid=0x%x for cid=0x%x\n",
680 cmd->task_id, qedi_conn->iscsi_conn_id);
681 cmd->state = RESPONSE_RECEIVED;
682 if (qedi_io_tracing)
683 qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
684
685 qedi_clear_task_idx(qedi, cmd->task_id);
686 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
687 conn->data, datalen);
688error:
689 spin_unlock_bh(&session->back_lock);
690}
691
692static void qedi_mtask_completion(struct qedi_ctx *qedi,
693 union iscsi_cqe *cqe,
694 struct iscsi_task *task,
695 struct qedi_conn *conn, uint16_t que_idx)
696{
697 struct iscsi_conn *iscsi_conn;
698 u32 hdr_opcode;
699
700 hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
701 iscsi_conn = conn->cls_conn->dd_data;
702
703 switch (hdr_opcode) {
704 case ISCSI_OPCODE_SCSI_RESPONSE:
705 case ISCSI_OPCODE_DATA_IN:
706 qedi_scsi_completion(qedi, cqe, task, iscsi_conn);
707 break;
708 case ISCSI_OPCODE_LOGIN_RESPONSE:
709 qedi_process_login_resp(qedi, cqe, task, conn);
710 break;
711 case ISCSI_OPCODE_TMF_RESPONSE:
712 qedi_process_tmf_resp(qedi, cqe, task, conn);
713 break;
714 case ISCSI_OPCODE_TEXT_RESPONSE:
715 qedi_process_text_resp(qedi, cqe, task, conn);
716 break;
717 case ISCSI_OPCODE_LOGOUT_RESPONSE:
718 qedi_process_logout_resp(qedi, cqe, task, conn);
719 break;
720 case ISCSI_OPCODE_NOP_IN:
721 qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx);
722 break;
723 default:
724 QEDI_ERR(&qedi->dbg_ctx, "unknown opcode\n");
725 }
726}
727
728static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
729 struct iscsi_cqe_solicited *cqe,
730 struct iscsi_task *task,
731 struct qedi_conn *qedi_conn)
732{
733 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
734 struct iscsi_session *session = conn->session;
735 struct qedi_cmd *cmd = task->dd_data;
736
737 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL,
738 "itid=0x%x, cmd task id=0x%x\n",
739 cqe->itid, cmd->task_id);
740
741 cmd->state = RESPONSE_RECEIVED;
742 qedi_clear_task_idx(qedi, cmd->task_id);
743
744 spin_lock_bh(&session->back_lock);
745 __iscsi_put_task(task);
746 spin_unlock_bh(&session->back_lock);
747}
748
749static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
750 struct iscsi_cqe_solicited *cqe,
751 struct iscsi_task *task,
752 struct iscsi_conn *conn)
753{
754 struct qedi_work_map *work, *work_tmp;
755 u32 proto_itt = cqe->itid;
756 u32 ptmp_itt = 0;
757 itt_t protoitt = 0;
758 int found = 0;
759 struct qedi_cmd *qedi_cmd = NULL;
760 u32 rtid = 0;
761 u32 iscsi_cid;
762 struct qedi_conn *qedi_conn;
763 struct qedi_cmd *cmd_new, *dbg_cmd;
764 struct iscsi_task *mtask;
765 struct iscsi_tm *tmf_hdr = NULL;
766
767 iscsi_cid = cqe->conn_id;
768 qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
769
770 /* Based on this itt get the corresponding qedi_cmd */
771 spin_lock_bh(&qedi_conn->tmf_work_lock);
772 list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list,
773 list) {
774 if (work->rtid == proto_itt) {
775 /* We found the command */
776 qedi_cmd = work->qedi_cmd;
777 if (!qedi_cmd->list_tmf_work) {
778 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
779 "TMF work not found, cqe->tid=0x%x, cid=0x%x\n",
780 proto_itt, qedi_conn->iscsi_conn_id);
781 WARN_ON(1);
782 }
783 found = 1;
784 mtask = qedi_cmd->task;
785 tmf_hdr = (struct iscsi_tm *)mtask->hdr;
786 rtid = work->rtid;
787
788 list_del_init(&work->list);
789 kfree(work);
790 qedi_cmd->list_tmf_work = NULL;
791 }
792 }
793 spin_unlock_bh(&qedi_conn->tmf_work_lock);
794
795 if (found) {
796 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
797 "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
798 proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
799
800 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
801 ISCSI_TM_FUNC_ABORT_TASK) {
802 spin_lock_bh(&conn->session->back_lock);
803
804 protoitt = build_itt(get_itt(tmf_hdr->rtt),
805 conn->session->age);
806 task = iscsi_itt_to_task(conn, protoitt);
807
808 spin_unlock_bh(&conn->session->back_lock);
809
810 if (!task) {
811 QEDI_NOTICE(&qedi->dbg_ctx,
812 "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
813 get_itt(tmf_hdr->rtt),
814 qedi_conn->iscsi_conn_id);
815 return;
816 }
817
818 dbg_cmd = task->dd_data;
819
820 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
821 "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
822 get_itt(tmf_hdr->rtt), get_itt(task->itt),
823 dbg_cmd->task_id, qedi_conn->iscsi_conn_id);
824
825 if (qedi_cmd->state == CLEANUP_WAIT_FAILED)
826 qedi_cmd->state = CLEANUP_RECV;
827
828 qedi_clear_task_idx(qedi_conn->qedi, rtid);
829
830 spin_lock(&qedi_conn->list_lock);
831 list_del_init(&dbg_cmd->io_cmd);
832 qedi_conn->active_cmd_count--;
833 spin_unlock(&qedi_conn->list_lock);
834 qedi_cmd->state = CLEANUP_RECV;
835 wake_up_interruptible(&qedi_conn->wait_queue);
836 }
837 } else if (qedi_conn->cmd_cleanup_req > 0) {
838 spin_lock_bh(&conn->session->back_lock);
839 qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
840 protoitt = build_itt(ptmp_itt, conn->session->age);
841 task = iscsi_itt_to_task(conn, protoitt);
842 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
843 "cleanup io itid=0x%x, protoitt=0x%x, cmd_cleanup_cmpl=%d, cid=0x%x\n",
844 cqe->itid, protoitt, qedi_conn->cmd_cleanup_cmpl,
845 qedi_conn->iscsi_conn_id);
846
847 spin_unlock_bh(&conn->session->back_lock);
848 if (!task) {
849 QEDI_NOTICE(&qedi->dbg_ctx,
850 "task is null, itid=0x%x, cid=0x%x\n",
851 cqe->itid, qedi_conn->iscsi_conn_id);
852 return;
853 }
854 qedi_conn->cmd_cleanup_cmpl++;
855 wake_up(&qedi_conn->wait_queue);
856 cmd_new = task->dd_data;
857
858 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
859 "Freeing tid=0x%x for cid=0x%x\n",
860 cqe->itid, qedi_conn->iscsi_conn_id);
861 qedi_clear_task_idx(qedi_conn->qedi, cqe->itid);
862
863 } else {
864 qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
865 protoitt = build_itt(ptmp_itt, conn->session->age);
866 task = iscsi_itt_to_task(conn, protoitt);
867 QEDI_ERR(&qedi->dbg_ctx,
868 "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
869 protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
870 WARN_ON(1);
871 }
872}
873
874void qedi_fp_process_cqes(struct qedi_work *work)
875{
876 struct qedi_ctx *qedi = work->qedi;
877 union iscsi_cqe *cqe = &work->cqe;
878 struct iscsi_task *task = NULL;
879 struct iscsi_nopout *nopout_hdr;
880 struct qedi_conn *q_conn;
881 struct iscsi_conn *conn;
882 struct qedi_cmd *qedi_cmd;
883 u32 comp_type;
884 u32 iscsi_cid;
885 u32 hdr_opcode;
886 u16 que_idx = work->que_idx;
887 u8 cqe_err_bits = 0;
888
889 comp_type = cqe->cqe_common.cqe_type;
890 hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
891 cqe_err_bits =
892 cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
893
894 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
895 "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n",
896 cqe->cqe_common.conn_id, comp_type, hdr_opcode);
897
898 if (comp_type >= MAX_ISCSI_CQES_TYPE) {
899 QEDI_WARN(&qedi->dbg_ctx, "Invalid CqE type\n");
900 return;
901 }
902
903 iscsi_cid = cqe->cqe_common.conn_id;
904 q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
905 if (!q_conn) {
906 QEDI_WARN(&qedi->dbg_ctx,
907 "Session no longer exists for cid=0x%x!!\n",
908 iscsi_cid);
909 return;
910 }
911
912 conn = q_conn->cls_conn->dd_data;
913
914 if (unlikely(cqe_err_bits &&
915 GET_FIELD(cqe_err_bits,
916 CQE_ERROR_BITMAP_DATA_DIGEST_ERR))) {
917 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
918 return;
919 }
920
921 switch (comp_type) {
922 case ISCSI_CQE_TYPE_SOLICITED:
923 case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
924 qedi_cmd = container_of(work, struct qedi_cmd, cqe_work);
925 task = qedi_cmd->task;
926 if (!task) {
927 QEDI_WARN(&qedi->dbg_ctx, "task is NULL\n");
928 return;
929 }
930
931 /* Process NOPIN local completion */
932 nopout_hdr = (struct iscsi_nopout *)task->hdr;
933 if ((nopout_hdr->itt == RESERVED_ITT) &&
934 (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) {
935 qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited,
936 task, q_conn);
937 } else {
938 cqe->cqe_solicited.itid =
939 qedi_get_itt(cqe->cqe_solicited);
940 /* Process other solicited responses */
941 qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx);
942 }
943 break;
944 case ISCSI_CQE_TYPE_UNSOLICITED:
945 switch (hdr_opcode) {
946 case ISCSI_OPCODE_NOP_IN:
947 qedi_process_nopin_mesg(qedi, cqe, task, q_conn,
948 que_idx);
949 break;
950 case ISCSI_OPCODE_ASYNC_MSG:
951 qedi_process_async_mesg(qedi, cqe, task, q_conn,
952 que_idx);
953 break;
954 case ISCSI_OPCODE_REJECT:
955 qedi_process_reject_mesg(qedi, cqe, task, q_conn,
956 que_idx);
957 break;
958 }
959 goto exit_fp_process;
960 case ISCSI_CQE_TYPE_DUMMY:
961 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Dummy CqE\n");
962 goto exit_fp_process;
963 case ISCSI_CQE_TYPE_TASK_CLEANUP:
964 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n");
965 qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task,
966 conn);
967 goto exit_fp_process;
968 default:
969 QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n");
970 break;
971 }
972
973exit_fp_process:
974 return;
975}
976
977static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task,
978 u16 tid, uint16_t ptu_invalidate, int is_cleanup)
979{
980 struct iscsi_wqe *wqe;
981 struct iscsi_wqe_field *cont_field;
982 struct qedi_endpoint *ep;
983 struct scsi_cmnd *sc = task->sc;
984 struct iscsi_login_req *login_hdr;
985 struct qedi_cmd *cmd = task->dd_data;
986
987 login_hdr = (struct iscsi_login_req *)task->hdr;
988 ep = qedi_conn->ep;
989 wqe = &ep->sq[ep->sq_prod_idx];
990
991 memset(wqe, 0, sizeof(*wqe));
992
993 ep->sq_prod_idx++;
994 ep->fw_sq_prod_idx++;
995 if (ep->sq_prod_idx == QEDI_SQ_SIZE)
996 ep->sq_prod_idx = 0;
997
998 if (is_cleanup) {
999 SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
1000 ISCSI_WQE_TYPE_TASK_CLEANUP);
1001 wqe->task_id = tid;
1002 return;
1003 }
1004
1005 if (ptu_invalidate) {
1006 SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE,
1007 ISCSI_WQE_SET_PTU_INVALIDATE);
1008 }
1009
1010 cont_field = &wqe->cont_prevtid_union.cont_field;
1011
1012 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
1013 case ISCSI_OP_LOGIN:
1014 case ISCSI_OP_TEXT:
1015 SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
1016 ISCSI_WQE_TYPE_MIDDLE_PATH);
1017 SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
1018 1);
1019 cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength);
1020 break;
1021 case ISCSI_OP_LOGOUT:
1022 case ISCSI_OP_NOOP_OUT:
1023 case ISCSI_OP_SCSI_TMFUNC:
1024 SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
1025 ISCSI_WQE_TYPE_NORMAL);
1026 break;
1027 default:
1028 if (!sc)
1029 break;
1030
1031 SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
1032 ISCSI_WQE_TYPE_NORMAL);
1033 cont_field->contlen_cdbsize_field =
1034 (sc->sc_data_direction == DMA_TO_DEVICE) ?
1035 scsi_bufflen(sc) : 0;
1036 if (cmd->use_slowpath)
1037 SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0);
1038 else
1039 SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
1040 (sc->sc_data_direction ==
1041 DMA_TO_DEVICE) ?
1042 min((u16)QEDI_FAST_SGE_COUNT,
1043 (u16)cmd->io_tbl.sge_valid) : 0);
1044 break;
1045 }
1046
1047 wqe->task_id = tid;
1048 /* Make sure SQ data is coherent */
1049 wmb();
1050}
1051
1052static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
1053{
1054 struct iscsi_db_data dbell = { 0 };
1055
1056 dbell.agg_flags = 0;
1057
1058 dbell.params |= DB_DEST_XCM << ISCSI_DB_DATA_DEST_SHIFT;
1059 dbell.params |= DB_AGG_CMD_SET << ISCSI_DB_DATA_AGG_CMD_SHIFT;
1060 dbell.params |=
1061 DQ_XCM_ISCSI_SQ_PROD_CMD << ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT;
1062
1063 dbell.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
1064 writel(*(u32 *)&dbell, qedi_conn->ep->p_doorbell);
1065
1066 /* Make sure fw write idx is coherent, and include both memory barriers
1067 * as a failsafe as for some architectures the call is the same but on
1068 * others they are two different assembly operations.
1069 */
1070 wmb();
1071 mmiowb();
1072 QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
1073 "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
1074 qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
1075 qedi_conn->iscsi_conn_id);
1076}
1077
1078int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
1079 struct iscsi_task *task)
1080{
1081 struct qedi_ctx *qedi = qedi_conn->qedi;
1082 struct iscsi_task_context *fw_task_ctx;
1083 struct iscsi_login_req *login_hdr;
1084 struct iscsi_login_req_hdr *fw_login_req = NULL;
1085 struct iscsi_cached_sge_ctx *cached_sge = NULL;
1086 struct iscsi_sge *single_sge = NULL;
1087 struct iscsi_sge *req_sge = NULL;
1088 struct iscsi_sge *resp_sge = NULL;
1089 struct qedi_cmd *qedi_cmd;
1090 s16 ptu_invalidate = 0;
1091 s16 tid = 0;
1092
1093 req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1094 resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1095 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1096 login_hdr = (struct iscsi_login_req *)task->hdr;
1097
1098 tid = qedi_get_task_idx(qedi);
1099 if (tid == -1)
1100 return -ENOMEM;
1101
1102 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
1103 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
1104
1105 qedi_cmd->task_id = tid;
1106
1107 /* Ystorm context */
1108 fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req;
1109 fw_login_req->opcode = login_hdr->opcode;
1110 fw_login_req->version_min = login_hdr->min_version;
1111 fw_login_req->version_max = login_hdr->max_version;
1112 fw_login_req->flags_attr = login_hdr->flags;
1113 fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2);
1114 fw_login_req->isid_d = *((u32 *)login_hdr->isid);
1115 fw_login_req->tsih = login_hdr->tsih;
1116 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1117 fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt));
1118 fw_login_req->cid = qedi_conn->iscsi_conn_id;
1119 fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
1120 fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
1121 fw_login_req->exp_stat_sn = 0;
1122
1123 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
1124 ptu_invalidate = 1;
1125 qedi->tid_reuse_count[tid] = 0;
1126 }
1127
1128 fw_task_ctx->ystorm_st_context.state.reuse_count =
1129 qedi->tid_reuse_count[tid];
1130 fw_task_ctx->mstorm_st_context.reuse_count =
1131 qedi->tid_reuse_count[tid]++;
1132 cached_sge =
1133 &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
1134 cached_sge->sge.sge_len = req_sge->sge_len;
1135 cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
1136 cached_sge->sge.sge_addr.hi =
1137 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
1138
1139 /* Mstorm context */
1140 single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
1141 fw_task_ctx->mstorm_st_context.task_type = 0x2;
1142 fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
1143 single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
1144 single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
1145 single_sge->sge_len = resp_sge->sge_len;
1146
1147 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
1148 ISCSI_MFLAGS_SINGLE_SGE, 1);
1149 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
1150 ISCSI_MFLAGS_SLOW_IO, 0);
1151 fw_task_ctx->mstorm_st_context.sgl_size = 1;
1152 fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
1153
1154 /* Ustorm context */
1155 fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
1156 fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
1157 ntoh24(login_hdr->dlength);
1158 fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
1159 fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
1160 fw_task_ctx->ustorm_st_context.task_type = 0x2;
1161 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
1162 fw_task_ctx->ustorm_ag_context.exp_data_acked =
1163 ntoh24(login_hdr->dlength);
1164 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
1165 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
1166 SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
1167 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
1168
1169 spin_lock(&qedi_conn->list_lock);
1170 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
1171 qedi_cmd->io_cmd_in_list = true;
1172 qedi_conn->active_cmd_count++;
1173 spin_unlock(&qedi_conn->list_lock);
1174
1175 qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
1176 qedi_ring_doorbell(qedi_conn);
1177 return 0;
1178}
1179
1180int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
1181 struct iscsi_task *task)
1182{
1183 struct qedi_ctx *qedi = qedi_conn->qedi;
1184 struct iscsi_logout_req_hdr *fw_logout_req = NULL;
1185 struct iscsi_task_context *fw_task_ctx = NULL;
1186 struct iscsi_logout *logout_hdr = NULL;
1187 struct qedi_cmd *qedi_cmd = NULL;
1188 s16 tid = 0;
1189 s16 ptu_invalidate = 0;
1190
1191 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1192 logout_hdr = (struct iscsi_logout *)task->hdr;
1193
1194 tid = qedi_get_task_idx(qedi);
1195 if (tid == -1)
1196 return -ENOMEM;
1197
1198 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
1199
1200 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
1201 qedi_cmd->task_id = tid;
1202
1203 /* Ystorm context */
1204 fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req;
1205 fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST;
1206 fw_logout_req->reason_code = 0x80 | logout_hdr->flags;
1207 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1208 fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt));
1209 fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
1210 fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
1211
1212 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
1213 ptu_invalidate = 1;
1214 qedi->tid_reuse_count[tid] = 0;
1215 }
1216 fw_task_ctx->ystorm_st_context.state.reuse_count =
1217 qedi->tid_reuse_count[tid];
1218 fw_task_ctx->mstorm_st_context.reuse_count =
1219 qedi->tid_reuse_count[tid]++;
1220 fw_logout_req->cid = qedi_conn->iscsi_conn_id;
1221 fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
1222
1223 /* Mstorm context */
1224 fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
1225 fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
1226
1227 /* Ustorm context */
1228 fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
1229 fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
1230 fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
1231 fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
1232 fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
1233
1234 SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
1235 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
1236 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
1237 ISCSI_REG1_NUM_FAST_SGES, 0);
1238
1239 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
1240 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
1241 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
1242
1243 spin_lock(&qedi_conn->list_lock);
1244 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
1245 qedi_cmd->io_cmd_in_list = true;
1246 qedi_conn->active_cmd_count++;
1247 spin_unlock(&qedi_conn->list_lock);
1248
1249 qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
1250 qedi_ring_doorbell(qedi_conn);
1251
1252 return 0;
1253}
1254
1255int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
1256 struct iscsi_task *task, bool in_recovery)
1257{
1258 int rval;
1259 struct iscsi_task *ctask;
1260 struct qedi_cmd *cmd, *cmd_tmp;
1261 struct iscsi_tm *tmf_hdr;
1262 unsigned int lun = 0;
1263 bool lun_reset = false;
1264 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
1265 struct iscsi_session *session = conn->session;
1266
1267 /* From recovery, task is NULL or from tmf resp valid task */
1268 if (task) {
1269 tmf_hdr = (struct iscsi_tm *)task->hdr;
1270
1271 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1272 ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) {
1273 lun_reset = true;
1274 lun = scsilun_to_int(&tmf_hdr->lun);
1275 }
1276 }
1277
1278 qedi_conn->cmd_cleanup_req = 0;
1279 qedi_conn->cmd_cleanup_cmpl = 0;
1280
1281 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1282 "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
1283 qedi_conn->active_cmd_count, qedi_conn->iscsi_conn_id,
1284 in_recovery, lun_reset);
1285
1286 if (lun_reset)
1287 spin_lock_bh(&session->back_lock);
1288
1289 spin_lock(&qedi_conn->list_lock);
1290
1291 list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
1292 io_cmd) {
1293 ctask = cmd->task;
1294 if (ctask == task)
1295 continue;
1296
1297 if (lun_reset) {
1298 if (cmd->scsi_cmd && cmd->scsi_cmd->device) {
1299 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1300 "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n",
1301 cmd->task_id, get_itt(ctask->itt),
1302 cmd->scsi_cmd, cmd->scsi_cmd->device,
1303 ctask->state, cmd->state,
1304 qedi_conn->iscsi_conn_id);
1305 if (cmd->scsi_cmd->device->lun != lun)
1306 continue;
1307 }
1308 }
1309 qedi_conn->cmd_cleanup_req++;
1310 qedi_iscsi_cleanup_task(ctask, true);
1311
1312 list_del_init(&cmd->io_cmd);
1313 qedi_conn->active_cmd_count--;
1314 QEDI_WARN(&qedi->dbg_ctx,
1315 "Deleted active cmd list node io_cmd=%p, cid=0x%x\n",
1316 &cmd->io_cmd, qedi_conn->iscsi_conn_id);
1317 }
1318
1319 spin_unlock(&qedi_conn->list_lock);
1320
1321 if (lun_reset)
1322 spin_unlock_bh(&session->back_lock);
1323
1324 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1325 "cmd_cleanup_req=%d, cid=0x%x\n",
1326 qedi_conn->cmd_cleanup_req,
1327 qedi_conn->iscsi_conn_id);
1328
1329 rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
1330 ((qedi_conn->cmd_cleanup_req ==
1331 qedi_conn->cmd_cleanup_cmpl) ||
1332 qedi_conn->ep),
1333 5 * HZ);
1334 if (rval) {
1335 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1336 "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
1337 qedi_conn->cmd_cleanup_req,
1338 qedi_conn->cmd_cleanup_cmpl,
1339 qedi_conn->iscsi_conn_id);
1340
1341 return 0;
1342 }
1343
1344 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1345 "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
1346 qedi_conn->cmd_cleanup_req,
1347 qedi_conn->cmd_cleanup_cmpl,
1348 qedi_conn->iscsi_conn_id);
1349
1350 iscsi_host_for_each_session(qedi->shost,
1351 qedi_mark_device_missing);
1352 qedi_ops->common->drain(qedi->cdev);
1353
1354 /* Enable IOs for all other sessions except current.*/
1355 if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
1356 (qedi_conn->cmd_cleanup_req ==
1357 qedi_conn->cmd_cleanup_cmpl),
1358 5 * HZ)) {
1359 iscsi_host_for_each_session(qedi->shost,
1360 qedi_mark_device_available);
1361 return -1;
1362 }
1363
1364 iscsi_host_for_each_session(qedi->shost,
1365 qedi_mark_device_available);
1366
1367 return 0;
1368}
1369
1370void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
1371 struct iscsi_task *task)
1372{
1373 struct qedi_endpoint *qedi_ep;
1374 int rval;
1375
1376 qedi_ep = qedi_conn->ep;
1377 qedi_conn->cmd_cleanup_req = 0;
1378 qedi_conn->cmd_cleanup_cmpl = 0;
1379
1380 if (!qedi_ep) {
1381 QEDI_WARN(&qedi->dbg_ctx,
1382 "Cannot proceed, ep already disconnected, cid=0x%x\n",
1383 qedi_conn->iscsi_conn_id);
1384 return;
1385 }
1386
1387 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1388 "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n",
1389 qedi_conn->iscsi_conn_id, qedi_conn, qedi_ep);
1390
1391 qedi_ops->clear_sq(qedi->cdev, qedi_ep->handle);
1392
1393 rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true);
1394 if (rval) {
1395 QEDI_ERR(&qedi->dbg_ctx,
1396 "fatal error, need hard reset, cid=0x%x\n",
1397 qedi_conn->iscsi_conn_id);
1398 WARN_ON(1);
1399 }
1400}
1401
1402static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi,
1403 struct qedi_conn *qedi_conn,
1404 struct iscsi_task *task,
1405 struct qedi_cmd *qedi_cmd,
1406 struct qedi_work_map *list_work)
1407{
1408 struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data;
1409 int wait;
1410
1411 wait = wait_event_interruptible_timeout(qedi_conn->wait_queue,
1412 ((qedi_cmd->state ==
1413 CLEANUP_RECV) ||
1414 ((qedi_cmd->type == TYPEIO) &&
1415 (cmd->state ==
1416 RESPONSE_RECEIVED))),
1417 5 * HZ);
1418 if (!wait) {
1419 qedi_cmd->state = CLEANUP_WAIT_FAILED;
1420
1421 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1422 "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n",
1423 cmd->task_id, qedi_conn->iscsi_conn_id);
1424
1425 return -1;
1426 }
1427 return 0;
1428}
1429
1430static void qedi_tmf_work(struct work_struct *work)
1431{
1432 struct qedi_cmd *qedi_cmd =
1433 container_of(work, struct qedi_cmd, tmf_work);
1434 struct qedi_conn *qedi_conn = qedi_cmd->conn;
1435 struct qedi_ctx *qedi = qedi_conn->qedi;
1436 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
1437 struct iscsi_cls_session *cls_sess;
1438 struct qedi_work_map *list_work = NULL;
1439 struct iscsi_task *mtask;
1440 struct qedi_cmd *cmd;
1441 struct iscsi_task *ctask;
1442 struct iscsi_tm *tmf_hdr;
1443 s16 rval = 0;
1444 s16 tid = 0;
1445
1446 mtask = qedi_cmd->task;
1447 tmf_hdr = (struct iscsi_tm *)mtask->hdr;
1448 cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
1449 set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
1450
1451 ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
1452 if (!ctask || !ctask->sc) {
1453 QEDI_ERR(&qedi->dbg_ctx, "Task already completed\n");
1454 goto abort_ret;
1455 }
1456
1457 cmd = (struct qedi_cmd *)ctask->dd_data;
1458 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1459 "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n",
1460 get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
1461 qedi_conn->iscsi_conn_id);
1462
1463 if (do_not_recover) {
1464 QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
1465 do_not_recover);
1466 goto abort_ret;
1467 }
1468
1469 list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
1470 if (!list_work) {
1471 QEDI_ERR(&qedi->dbg_ctx, "Memory alloction failed\n");
1472 goto abort_ret;
1473 }
1474
1475 qedi_cmd->type = TYPEIO;
1476 list_work->qedi_cmd = qedi_cmd;
1477 list_work->rtid = cmd->task_id;
1478 list_work->state = QEDI_WORK_SCHEDULED;
1479 qedi_cmd->list_tmf_work = list_work;
1480
1481 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1482 "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n",
1483 list_work->ptr_tmf_work, list_work, qedi_conn->iscsi_conn_id,
1484 tmf_hdr->flags);
1485
1486 spin_lock_bh(&qedi_conn->tmf_work_lock);
1487 list_add_tail(&list_work->list, &qedi_conn->tmf_work_list);
1488 spin_unlock_bh(&qedi_conn->tmf_work_lock);
1489
1490 qedi_iscsi_cleanup_task(ctask, false);
1491
1492 rval = qedi_wait_for_cleanup_request(qedi, qedi_conn, ctask, qedi_cmd,
1493 list_work);
1494 if (rval == -1) {
1495 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1496 "FW cleanup got escalated, cid=0x%x\n",
1497 qedi_conn->iscsi_conn_id);
1498 goto ldel_exit;
1499 }
1500
1501 tid = qedi_get_task_idx(qedi);
1502 if (tid == -1) {
1503 QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
1504 qedi_conn->iscsi_conn_id);
1505 goto ldel_exit;
1506 }
1507
1508 qedi_cmd->task_id = tid;
1509 qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
1510
1511abort_ret:
1512 clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
1513 return;
1514
1515ldel_exit:
1516 spin_lock_bh(&qedi_conn->tmf_work_lock);
1517 if (!qedi_cmd->list_tmf_work) {
1518 list_del_init(&list_work->list);
1519 qedi_cmd->list_tmf_work = NULL;
1520 kfree(list_work);
1521 }
1522 spin_unlock_bh(&qedi_conn->tmf_work_lock);
1523
1524 spin_lock(&qedi_conn->list_lock);
1525 list_del_init(&cmd->io_cmd);
1526 qedi_conn->active_cmd_count--;
1527 spin_unlock(&qedi_conn->list_lock);
1528
1529 clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
1530}
1531
1532static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
1533 struct iscsi_task *mtask)
1534{
1535 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
1536 struct qedi_ctx *qedi = qedi_conn->qedi;
1537 struct iscsi_task_context *fw_task_ctx;
1538 struct iscsi_tmf_request_hdr *fw_tmf_request;
1539 struct iscsi_sge *single_sge;
1540 struct qedi_cmd *qedi_cmd;
1541 struct qedi_cmd *cmd;
1542 struct iscsi_task *ctask;
1543 struct iscsi_tm *tmf_hdr;
1544 struct iscsi_sge *req_sge;
1545 struct iscsi_sge *resp_sge;
1546 u32 lun[2];
1547 s16 tid = 0, ptu_invalidate = 0;
1548
1549 req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1550 resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1551 qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
1552 tmf_hdr = (struct iscsi_tm *)mtask->hdr;
1553
1554 tid = qedi_cmd->task_id;
1555 qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
1556
1557 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
1558 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
1559
1560 fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request;
1561 fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt));
1562 fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
1563
1564 memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
1565 fw_tmf_request->lun.lo = be32_to_cpu(lun[0]);
1566 fw_tmf_request->lun.hi = be32_to_cpu(lun[1]);
1567
1568 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
1569 ptu_invalidate = 1;
1570 qedi->tid_reuse_count[tid] = 0;
1571 }
1572 fw_task_ctx->ystorm_st_context.state.reuse_count =
1573 qedi->tid_reuse_count[tid];
1574 fw_task_ctx->mstorm_st_context.reuse_count =
1575 qedi->tid_reuse_count[tid]++;
1576
1577 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1578 ISCSI_TM_FUNC_ABORT_TASK) {
1579 ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
1580 if (!ctask || !ctask->sc) {
1581 QEDI_ERR(&qedi->dbg_ctx,
1582 "Could not get reference task\n");
1583 return 0;
1584 }
1585 cmd = (struct qedi_cmd *)ctask->dd_data;
1586 fw_tmf_request->rtt =
1587 qedi_set_itt(cmd->task_id,
1588 get_itt(tmf_hdr->rtt));
1589 } else {
1590 fw_tmf_request->rtt = ISCSI_RESERVED_TAG;
1591 }
1592
1593 fw_tmf_request->opcode = tmf_hdr->opcode;
1594 fw_tmf_request->function = tmf_hdr->flags;
1595 fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength);
1596 fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
1597
1598 single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
1599 fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
1600 fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
1601 single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
1602 single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
1603 single_sge->sge_len = resp_sge->sge_len;
1604
1605 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
1606 ISCSI_MFLAGS_SINGLE_SGE, 1);
1607 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
1608 ISCSI_MFLAGS_SLOW_IO, 0);
1609 fw_task_ctx->mstorm_st_context.sgl_size = 1;
1610 fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
1611
1612 /* Ustorm context */
1613 fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
1614 fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
1615 fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
1616 fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
1617 fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
1618
1619 SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
1620 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
1621 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
1622 ISCSI_REG1_NUM_FAST_SGES, 0);
1623
1624 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
1625 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
1626 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
1627 fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
1628 fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
1629
1630 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1631 "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n",
1632 tid, mtask->itt, qedi_conn->iscsi_conn_id);
1633
1634 spin_lock(&qedi_conn->list_lock);
1635 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
1636 qedi_cmd->io_cmd_in_list = true;
1637 qedi_conn->active_cmd_count++;
1638 spin_unlock(&qedi_conn->list_lock);
1639
1640 qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false);
1641 qedi_ring_doorbell(qedi_conn);
1642 return 0;
1643}
1644
1645int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
1646 struct iscsi_task *mtask)
1647{
1648 struct qedi_ctx *qedi = qedi_conn->qedi;
1649 struct iscsi_tm *tmf_hdr;
1650 struct qedi_cmd *qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
1651 s16 tid = 0;
1652
1653 tmf_hdr = (struct iscsi_tm *)mtask->hdr;
1654 qedi_cmd->task = mtask;
1655
1656 /* If abort task then schedule the work and return */
1657 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1658 ISCSI_TM_FUNC_ABORT_TASK) {
1659 qedi_cmd->state = CLEANUP_WAIT;
1660 INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_work);
1661 queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
1662
1663 } else if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1664 ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
1665 ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1666 ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
1667 ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1668 ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
1669 tid = qedi_get_task_idx(qedi);
1670 if (tid == -1) {
1671 QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
1672 qedi_conn->iscsi_conn_id);
1673 return -1;
1674 }
1675 qedi_cmd->task_id = tid;
1676
1677 qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
1678
1679 } else {
1680 QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n",
1681 qedi_conn->iscsi_conn_id);
1682 return -1;
1683 }
1684
1685 return 0;
1686}
1687
1688int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
1689 struct iscsi_task *task)
1690{
1691 struct qedi_ctx *qedi = qedi_conn->qedi;
1692 struct iscsi_task_context *fw_task_ctx;
1693 struct iscsi_text_request_hdr *fw_text_request;
1694 struct iscsi_cached_sge_ctx *cached_sge;
1695 struct iscsi_sge *single_sge;
1696 struct qedi_cmd *qedi_cmd;
1697 /* For 6.5 hdr iscsi_hdr */
1698 struct iscsi_text *text_hdr;
1699 struct iscsi_sge *req_sge;
1700 struct iscsi_sge *resp_sge;
1701 s16 ptu_invalidate = 0;
1702 s16 tid = 0;
1703
1704 req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1705 resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1706 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1707 text_hdr = (struct iscsi_text *)task->hdr;
1708
1709 tid = qedi_get_task_idx(qedi);
1710 if (tid == -1)
1711 return -ENOMEM;
1712
1713 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
1714 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
1715
1716 qedi_cmd->task_id = tid;
1717
1718 /* Ystorm context */
1719 fw_text_request =
1720 &fw_task_ctx->ystorm_st_context.pdu_hdr.text_request;
1721 fw_text_request->opcode = text_hdr->opcode;
1722 fw_text_request->flags_attr = text_hdr->flags;
1723
1724 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1725 fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt));
1726 fw_text_request->ttt = text_hdr->ttt;
1727 fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
1728 fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
1729 fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength);
1730
1731 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
1732 ptu_invalidate = 1;
1733 qedi->tid_reuse_count[tid] = 0;
1734 }
1735 fw_task_ctx->ystorm_st_context.state.reuse_count =
1736 qedi->tid_reuse_count[tid];
1737 fw_task_ctx->mstorm_st_context.reuse_count =
1738 qedi->tid_reuse_count[tid]++;
1739
1740 cached_sge =
1741 &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
1742 cached_sge->sge.sge_len = req_sge->sge_len;
1743 cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
1744 cached_sge->sge.sge_addr.hi =
1745 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
1746
1747 /* Mstorm context */
1748 single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
1749 fw_task_ctx->mstorm_st_context.task_type = 0x2;
1750 fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
1751 single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
1752 single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
1753 single_sge->sge_len = resp_sge->sge_len;
1754
1755 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
1756 ISCSI_MFLAGS_SINGLE_SGE, 1);
1757 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
1758 ISCSI_MFLAGS_SLOW_IO, 0);
1759 fw_task_ctx->mstorm_st_context.sgl_size = 1;
1760 fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
1761
1762 /* Ustorm context */
1763 fw_task_ctx->ustorm_ag_context.exp_data_acked =
1764 ntoh24(text_hdr->dlength);
1765 fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
1766 fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
1767 ntoh24(text_hdr->dlength);
1768 fw_task_ctx->ustorm_st_context.exp_data_sn =
1769 be32_to_cpu(text_hdr->exp_statsn);
1770 fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
1771 fw_task_ctx->ustorm_st_context.task_type = 0x2;
1772 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
1773 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
1774 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
1775
1776 /* Add command in active command list */
1777 spin_lock(&qedi_conn->list_lock);
1778 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
1779 qedi_cmd->io_cmd_in_list = true;
1780 qedi_conn->active_cmd_count++;
1781 spin_unlock(&qedi_conn->list_lock);
1782
1783 qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
1784 qedi_ring_doorbell(qedi_conn);
1785
1786 return 0;
1787}
1788
1789int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
1790 struct iscsi_task *task,
1791 char *datap, int data_len, int unsol)
1792{
1793 struct qedi_ctx *qedi = qedi_conn->qedi;
1794 struct iscsi_task_context *fw_task_ctx;
1795 struct iscsi_nop_out_hdr *fw_nop_out;
1796 struct qedi_cmd *qedi_cmd;
1797 /* For 6.5 hdr iscsi_hdr */
1798 struct iscsi_nopout *nopout_hdr;
1799 struct iscsi_cached_sge_ctx *cached_sge;
1800 struct iscsi_sge *single_sge;
1801 struct iscsi_sge *req_sge;
1802 struct iscsi_sge *resp_sge;
1803 u32 lun[2];
1804 s16 ptu_invalidate = 0;
1805 s16 tid = 0;
1806
1807 req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1808 resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1809 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1810 nopout_hdr = (struct iscsi_nopout *)task->hdr;
1811
1812 tid = qedi_get_task_idx(qedi);
1813 if (tid == -1) {
1814 QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n");
1815 return -ENOMEM;
1816 }
1817
1818 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
1819
1820 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
1821 qedi_cmd->task_id = tid;
1822
1823 /* Ystorm context */
1824 fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out;
1825 SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
1826 SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
1827
1828 memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
1829 fw_nop_out->lun.lo = be32_to_cpu(lun[0]);
1830 fw_nop_out->lun.hi = be32_to_cpu(lun[1]);
1831
1832 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1833
1834 if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
1835 fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt);
1836 fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt);
1837 fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
1838 fw_task_ctx->ystorm_st_context.state.local_comp = 1;
1839 SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
1840 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
1841 } else {
1842 fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt));
1843 fw_nop_out->ttt = ISCSI_TTT_ALL_ONES;
1844 fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
1845
1846 spin_lock(&qedi_conn->list_lock);
1847 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
1848 qedi_cmd->io_cmd_in_list = true;
1849 qedi_conn->active_cmd_count++;
1850 spin_unlock(&qedi_conn->list_lock);
1851 }
1852
1853 fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT;
1854 fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
1855 fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
1856
1857 cached_sge =
1858 &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
1859 cached_sge->sge.sge_len = req_sge->sge_len;
1860 cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
1861 cached_sge->sge.sge_addr.hi =
1862 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
1863
1864 /* Mstorm context */
1865 fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
1866 fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
1867
1868 single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
1869 single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
1870 single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
1871 single_sge->sge_len = resp_sge->sge_len;
1872 fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
1873
1874 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
1875 ptu_invalidate = 1;
1876 qedi->tid_reuse_count[tid] = 0;
1877 }
1878 fw_task_ctx->ystorm_st_context.state.reuse_count =
1879 qedi->tid_reuse_count[tid];
1880 fw_task_ctx->mstorm_st_context.reuse_count =
1881 qedi->tid_reuse_count[tid]++;
1882 /* Ustorm context */
1883 fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
1884 fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len;
1885 fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
1886 fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
1887 fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
1888
1889 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
1890 ISCSI_REG1_NUM_FAST_SGES, 0);
1891
1892 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
1893 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
1894 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
1895
1896 fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
1897 fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
1898
1899 qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
1900 qedi_ring_doorbell(qedi_conn);
1901 return 0;
1902}
1903
1904static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
1905 int bd_index)
1906{
1907 struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
1908 int frag_size, sg_frags;
1909
1910 sg_frags = 0;
1911
1912 while (sg_len) {
1913 if (addr % QEDI_PAGE_SIZE)
1914 frag_size =
1915 (QEDI_PAGE_SIZE - (addr % QEDI_PAGE_SIZE));
1916 else
1917 frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 :
1918 (sg_len % QEDI_BD_SPLIT_SZ);
1919
1920 if (frag_size == 0)
1921 frag_size = QEDI_BD_SPLIT_SZ;
1922
1923 bd[bd_index + sg_frags].sge_addr.lo = (addr & 0xffffffff);
1924 bd[bd_index + sg_frags].sge_addr.hi = (addr >> 32);
1925 bd[bd_index + sg_frags].sge_len = (u16)frag_size;
1926 QEDI_INFO(&cmd->conn->qedi->dbg_ctx, QEDI_LOG_IO,
1927 "split sge %d: addr=%llx, len=%x",
1928 (bd_index + sg_frags), addr, frag_size);
1929
1930 addr += (u64)frag_size;
1931 sg_frags++;
1932 sg_len -= frag_size;
1933 }
1934 return sg_frags;
1935}
1936
1937static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
1938{
1939 struct scsi_cmnd *sc = cmd->scsi_cmd;
1940 struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
1941 struct scatterlist *sg;
1942 int byte_count = 0;
1943 int bd_count = 0;
1944 int sg_count;
1945 int sg_len;
1946 int sg_frags;
1947 u64 addr, end_addr;
1948 int i;
1949
1950 WARN_ON(scsi_sg_count(sc) > QEDI_ISCSI_MAX_BDS_PER_CMD);
1951
1952 sg_count = dma_map_sg(&qedi->pdev->dev, scsi_sglist(sc),
1953 scsi_sg_count(sc), sc->sc_data_direction);
1954
1955 /*
1956 * New condition to send single SGE as cached-SGL.
1957 * Single SGE with length less than 64K.
1958 */
1959 sg = scsi_sglist(sc);
1960 if ((sg_count == 1) && (sg_dma_len(sg) <= MAX_SGLEN_FOR_CACHESGL)) {
1961 sg_len = sg_dma_len(sg);
1962 addr = (u64)sg_dma_address(sg);
1963
1964 bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
1965 bd[bd_count].sge_addr.hi = (addr >> 32);
1966 bd[bd_count].sge_len = (u16)sg_len;
1967
1968 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
1969 "single-cashed-sgl: bd_count:%d addr=%llx, len=%x",
1970 sg_count, addr, sg_len);
1971
1972 return ++bd_count;
1973 }
1974
1975 scsi_for_each_sg(sc, sg, sg_count, i) {
1976 sg_len = sg_dma_len(sg);
1977 addr = (u64)sg_dma_address(sg);
1978 end_addr = (addr + sg_len);
1979
1980 /*
1981 * first sg elem in the 'list',
1982 * check if end addr is page-aligned.
1983 */
1984 if ((i == 0) && (sg_count > 1) && (end_addr % QEDI_PAGE_SIZE))
1985 cmd->use_slowpath = true;
1986
1987 /*
1988 * last sg elem in the 'list',
1989 * check if start addr is page-aligned.
1990 */
1991 else if ((i == (sg_count - 1)) &&
1992 (sg_count > 1) && (addr % QEDI_PAGE_SIZE))
1993 cmd->use_slowpath = true;
1994
1995 /*
1996 * middle sg elements in list,
1997 * check if start and end addr is page-aligned
1998 */
1999 else if ((i != 0) && (i != (sg_count - 1)) &&
2000 ((addr % QEDI_PAGE_SIZE) ||
2001 (end_addr % QEDI_PAGE_SIZE)))
2002 cmd->use_slowpath = true;
2003
2004 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "sg[%d] size=0x%x",
2005 i, sg_len);
2006
2007 if (sg_len > QEDI_BD_SPLIT_SZ) {
2008 sg_frags = qedi_split_bd(cmd, addr, sg_len, bd_count);
2009 } else {
2010 sg_frags = 1;
2011 bd[bd_count].sge_addr.lo = addr & 0xffffffff;
2012 bd[bd_count].sge_addr.hi = addr >> 32;
2013 bd[bd_count].sge_len = sg_len;
2014 }
2015 byte_count += sg_len;
2016 bd_count += sg_frags;
2017 }
2018
2019 if (byte_count != scsi_bufflen(sc))
2020 QEDI_ERR(&qedi->dbg_ctx,
2021 "byte_count = %d != scsi_bufflen = %d\n", byte_count,
2022 scsi_bufflen(sc));
2023 else
2024 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "byte_count = %d\n",
2025 byte_count);
2026
2027 WARN_ON(byte_count != scsi_bufflen(sc));
2028
2029 return bd_count;
2030}
2031
2032static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
2033{
2034 int bd_count;
2035 struct scsi_cmnd *sc = cmd->scsi_cmd;
2036
2037 if (scsi_sg_count(sc)) {
2038 bd_count = qedi_map_scsi_sg(cmd->conn->qedi, cmd);
2039 if (bd_count == 0)
2040 return;
2041 } else {
2042 struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
2043
2044 bd[0].sge_addr.lo = 0;
2045 bd[0].sge_addr.hi = 0;
2046 bd[0].sge_len = 0;
2047 bd_count = 0;
2048 }
2049 cmd->io_tbl.sge_valid = bd_count;
2050}
2051
2052static void qedi_cpy_scsi_cdb(struct scsi_cmnd *sc, u32 *dstp)
2053{
2054 u32 dword;
2055 int lpcnt;
2056 u8 *srcp;
2057
2058 lpcnt = sc->cmd_len / sizeof(dword);
2059 srcp = (u8 *)sc->cmnd;
2060 while (lpcnt--) {
2061 memcpy(&dword, (const void *)srcp, 4);
2062 *dstp = cpu_to_be32(dword);
2063 srcp += 4;
2064 dstp++;
2065 }
2066 if (sc->cmd_len & 0x3) {
2067 dword = (u32)srcp[0] | ((u32)srcp[1] << 8);
2068 *dstp = cpu_to_be32(dword);
2069 }
2070}
2071
2072void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
2073 u16 tid, int8_t direction)
2074{
2075 struct qedi_io_log *io_log;
2076 struct iscsi_conn *conn = task->conn;
2077 struct qedi_conn *qedi_conn = conn->dd_data;
2078 struct scsi_cmnd *sc_cmd = task->sc;
2079 unsigned long flags;
2080 u8 op;
2081
2082 spin_lock_irqsave(&qedi->io_trace_lock, flags);
2083
2084 io_log = &qedi->io_trace_buf[qedi->io_trace_idx];
2085 io_log->direction = direction;
2086 io_log->task_id = tid;
2087 io_log->cid = qedi_conn->iscsi_conn_id;
2088 io_log->lun = sc_cmd->device->lun;
2089 io_log->op = sc_cmd->cmnd[0];
2090 op = sc_cmd->cmnd[0];
2091 io_log->lba[0] = sc_cmd->cmnd[2];
2092 io_log->lba[1] = sc_cmd->cmnd[3];
2093 io_log->lba[2] = sc_cmd->cmnd[4];
2094 io_log->lba[3] = sc_cmd->cmnd[5];
2095 io_log->bufflen = scsi_bufflen(sc_cmd);
2096 io_log->sg_count = scsi_sg_count(sc_cmd);
2097 io_log->fast_sgs = qedi->fast_sgls;
2098 io_log->cached_sgs = qedi->cached_sgls;
2099 io_log->slow_sgs = qedi->slow_sgls;
2100 io_log->cached_sge = qedi->use_cached_sge;
2101 io_log->slow_sge = qedi->use_slow_sge;
2102 io_log->fast_sge = qedi->use_fast_sge;
2103 io_log->result = sc_cmd->result;
2104 io_log->jiffies = jiffies;
2105 io_log->blk_req_cpu = smp_processor_id();
2106
2107 if (direction == QEDI_IO_TRACE_REQ) {
2108 /* For requests we only care about the submission CPU */
2109 io_log->req_cpu = smp_processor_id() % qedi->num_queues;
2110 io_log->intr_cpu = 0;
2111 io_log->blk_rsp_cpu = 0;
2112 } else if (direction == QEDI_IO_TRACE_RSP) {
2113 io_log->req_cpu = smp_processor_id() % qedi->num_queues;
2114 io_log->intr_cpu = qedi->intr_cpu;
2115 io_log->blk_rsp_cpu = smp_processor_id();
2116 }
2117
2118 qedi->io_trace_idx++;
2119 if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE)
2120 qedi->io_trace_idx = 0;
2121
2122 qedi->use_cached_sge = false;
2123 qedi->use_slow_sge = false;
2124 qedi->use_fast_sge = false;
2125
2126 spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
2127}
2128
2129int qedi_iscsi_send_ioreq(struct iscsi_task *task)
2130{
2131 struct iscsi_conn *conn = task->conn;
2132 struct iscsi_session *session = conn->session;
2133 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
2134 struct qedi_ctx *qedi = iscsi_host_priv(shost);
2135 struct qedi_conn *qedi_conn = conn->dd_data;
2136 struct qedi_cmd *cmd = task->dd_data;
2137 struct scsi_cmnd *sc = task->sc;
2138 struct iscsi_task_context *fw_task_ctx;
2139 struct iscsi_cached_sge_ctx *cached_sge;
2140 struct iscsi_phys_sgl_ctx *phys_sgl;
2141 struct iscsi_virt_sgl_ctx *virt_sgl;
2142 struct ystorm_iscsi_task_st_ctx *yst_cxt;
2143 struct mstorm_iscsi_task_st_ctx *mst_cxt;
2144 struct iscsi_sgl *sgl_struct;
2145 struct iscsi_sge *single_sge;
2146 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
2147 struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
2148 enum iscsi_task_type task_type;
2149 struct iscsi_cmd_hdr *fw_cmd;
2150 u32 lun[2];
2151 u32 exp_data;
2152 u16 cq_idx = smp_processor_id() % qedi->num_queues;
2153 s16 ptu_invalidate = 0;
2154 s16 tid = 0;
2155 u8 num_fast_sgs;
2156
2157 tid = qedi_get_task_idx(qedi);
2158 if (tid == -1)
2159 return -ENOMEM;
2160
2161 qedi_iscsi_map_sg_list(cmd);
2162
2163 int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun);
2164 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
2165
2166 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
2167 cmd->task_id = tid;
2168
2169 /* Ystorm context */
2170 fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
2171 SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
2172
2173 if (sc->sc_data_direction == DMA_TO_DEVICE) {
2174 if (conn->session->initial_r2t_en) {
2175 exp_data = min((conn->session->imm_data_en *
2176 conn->max_xmit_dlength),
2177 conn->session->first_burst);
2178 exp_data = min(exp_data, scsi_bufflen(sc));
2179 fw_task_ctx->ustorm_ag_context.exp_data_acked =
2180 cpu_to_le32(exp_data);
2181 } else {
2182 fw_task_ctx->ustorm_ag_context.exp_data_acked =
2183 min(conn->session->first_burst, scsi_bufflen(sc));
2184 }
2185
2186 SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
2187 task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
2188 } else {
2189 if (scsi_bufflen(sc))
2190 SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1);
2191 task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
2192 }
2193
2194 fw_cmd->lun.lo = be32_to_cpu(lun[0]);
2195 fw_cmd->lun.hi = be32_to_cpu(lun[1]);
2196
2197 qedi_update_itt_map(qedi, tid, task->itt, cmd);
2198 fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt));
2199 fw_cmd->expected_transfer_length = scsi_bufflen(sc);
2200 fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
2201 fw_cmd->opcode = hdr->opcode;
2202 qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb);
2203
2204 /* Mstorm context */
2205 fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma;
2206 fw_task_ctx->mstorm_st_context.sense_db.hi =
2207 (u32)((u64)cmd->sense_buffer_dma >> 32);
2208 fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
2209 fw_task_ctx->mstorm_st_context.task_type = task_type;
2210
2211 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
2212 ptu_invalidate = 1;
2213 qedi->tid_reuse_count[tid] = 0;
2214 }
2215 fw_task_ctx->ystorm_st_context.state.reuse_count =
2216 qedi->tid_reuse_count[tid];
2217 fw_task_ctx->mstorm_st_context.reuse_count =
2218 qedi->tid_reuse_count[tid]++;
2219
2220 /* Ustorm context */
2221 fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc);
2222 fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc);
2223 fw_task_ctx->ustorm_st_context.exp_data_sn =
2224 be32_to_cpu(hdr->exp_statsn);
2225 fw_task_ctx->ustorm_st_context.task_type = task_type;
2226 fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx;
2227 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
2228
2229 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
2230 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
2231 SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
2232 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
2233
2234 num_fast_sgs = (cmd->io_tbl.sge_valid ?
2235 min((u16)QEDI_FAST_SGE_COUNT,
2236 (u16)cmd->io_tbl.sge_valid) : 0);
2237 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
2238 ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs);
2239
2240 fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
2241 fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
2242
2243 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n",
2244 cmd->io_tbl.sge_valid);
2245
2246 yst_cxt = &fw_task_ctx->ystorm_st_context;
2247 mst_cxt = &fw_task_ctx->mstorm_st_context;
2248 /* Tx path */
2249 if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
2250 /* not considering superIO or FastIO */
2251 if (cmd->io_tbl.sge_valid == 1) {
2252 cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge;
2253 cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo;
2254 cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi;
2255 cached_sge->sge.sge_len = bd[0].sge_len;
2256 qedi->cached_sgls++;
2257 } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
2258 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2259 ISCSI_MFLAGS_SLOW_IO, 1);
2260 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
2261 ISCSI_REG1_NUM_FAST_SGES, 0);
2262 phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl;
2263 phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
2264 phys_sgl->sgl_base.hi =
2265 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
2266 phys_sgl->sgl_size = cmd->io_tbl.sge_valid;
2267 qedi->slow_sgls++;
2268 } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
2269 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2270 ISCSI_MFLAGS_SLOW_IO, 0);
2271 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
2272 ISCSI_REG1_NUM_FAST_SGES,
2273 min((u16)QEDI_FAST_SGE_COUNT,
2274 (u16)cmd->io_tbl.sge_valid));
2275 virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl;
2276 virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
2277 virt_sgl->sgl_base.hi =
2278 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
2279 virt_sgl->sgl_initial_offset =
2280 (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
2281 qedi->fast_sgls++;
2282 }
2283 fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
2284 fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
2285 } else {
2286 /* Rx path */
2287 if (cmd->io_tbl.sge_valid == 1) {
2288 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2289 ISCSI_MFLAGS_SLOW_IO, 0);
2290 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2291 ISCSI_MFLAGS_SINGLE_SGE, 1);
2292 single_sge = &mst_cxt->sgl_union.single_sge;
2293 single_sge->sge_addr.lo = bd[0].sge_addr.lo;
2294 single_sge->sge_addr.hi = bd[0].sge_addr.hi;
2295 single_sge->sge_len = bd[0].sge_len;
2296 qedi->cached_sgls++;
2297 } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
2298 sgl_struct = &mst_cxt->sgl_union.sgl_struct;
2299 sgl_struct->sgl_addr.lo =
2300 (u32)(cmd->io_tbl.sge_tbl_dma);
2301 sgl_struct->sgl_addr.hi =
2302 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
2303 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2304 ISCSI_MFLAGS_SLOW_IO, 1);
2305 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
2306 ISCSI_REG1_NUM_FAST_SGES, 0);
2307 sgl_struct->updated_sge_size = 0;
2308 sgl_struct->updated_sge_offset = 0;
2309 qedi->slow_sgls++;
2310 } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
2311 sgl_struct = &mst_cxt->sgl_union.sgl_struct;
2312 sgl_struct->sgl_addr.lo =
2313 (u32)(cmd->io_tbl.sge_tbl_dma);
2314 sgl_struct->sgl_addr.hi =
2315 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
2316 sgl_struct->byte_offset =
2317 (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
2318 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2319 ISCSI_MFLAGS_SLOW_IO, 0);
2320 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
2321 ISCSI_REG1_NUM_FAST_SGES, 0);
2322 sgl_struct->updated_sge_size = 0;
2323 sgl_struct->updated_sge_offset = 0;
2324 qedi->fast_sgls++;
2325 }
2326 fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
2327 fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
2328 }
2329
2330 if (cmd->io_tbl.sge_valid == 1)
2331 /* Singel-SGL */
2332 qedi->use_cached_sge = true;
2333 else {
2334 if (cmd->use_slowpath)
2335 qedi->use_slow_sge = true;
2336 else
2337 qedi->use_fast_sge = true;
2338 }
2339 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
2340 "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x",
2341 (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
2342 "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
2343 "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
2344 (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma),
2345 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
2346
2347 /* Add command in active command list */
2348 spin_lock(&qedi_conn->list_lock);
2349 list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
2350 cmd->io_cmd_in_list = true;
2351 qedi_conn->active_cmd_count++;
2352 spin_unlock(&qedi_conn->list_lock);
2353
2354 qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
2355 qedi_ring_doorbell(qedi_conn);
2356 if (qedi_io_tracing)
2357 qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ);
2358
2359 return 0;
2360}
2361
2362int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
2363{
2364 struct iscsi_conn *conn = task->conn;
2365 struct qedi_conn *qedi_conn = conn->dd_data;
2366 struct qedi_cmd *cmd = task->dd_data;
2367 s16 ptu_invalidate = 0;
2368
2369 QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
2370 "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
2371 cmd->task_id, get_itt(task->itt), task->state,
2372 cmd->state, qedi_conn->iscsi_conn_id);
2373
2374 qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true);
2375 qedi_ring_doorbell(qedi_conn);
2376
2377 return 0;
2378}
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
new file mode 100644
index 000000000000..8e488de88ece
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -0,0 +1,73 @@
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#ifndef _QEDI_GBL_H_
11#define _QEDI_GBL_H_
12
13#include "qedi_iscsi.h"
14
15extern uint qedi_io_tracing;
16extern int do_not_recover;
17extern struct scsi_host_template qedi_host_template;
18extern struct iscsi_transport qedi_iscsi_transport;
19extern const struct qed_iscsi_ops *qedi_ops;
20extern struct qedi_debugfs_ops qedi_debugfs_ops;
21extern const struct file_operations qedi_dbg_fops;
22extern struct device_attribute *qedi_shost_attrs[];
23
24int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
25void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
26
27int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
28 struct iscsi_task *task);
29int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
30 struct iscsi_task *task);
31int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
32 struct iscsi_task *mtask);
33int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
34 struct iscsi_task *task);
35int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
36 struct iscsi_task *task,
37 char *datap, int data_len, int unsol);
38int qedi_iscsi_send_ioreq(struct iscsi_task *task);
39int qedi_get_task_idx(struct qedi_ctx *qedi);
40void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx);
41int qedi_iscsi_cleanup_task(struct iscsi_task *task,
42 bool mark_cmd_node_deleted);
43void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd);
44void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
45 struct qedi_cmd *qedi_cmd);
46void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
47void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
48void qedi_process_iscsi_error(struct qedi_endpoint *ep,
49 struct async_data *data);
50void qedi_start_conn_recovery(struct qedi_ctx *qedi,
51 struct qedi_conn *qedi_conn);
52struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid);
53void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data);
54void qedi_mark_device_missing(struct iscsi_cls_session *cls_session);
55void qedi_mark_device_available(struct iscsi_cls_session *cls_session);
56void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu);
57int qedi_recover_all_conns(struct qedi_ctx *qedi);
58void qedi_fp_process_cqes(struct qedi_work *work);
59int qedi_cleanup_all_io(struct qedi_ctx *qedi,
60 struct qedi_conn *qedi_conn,
61 struct iscsi_task *task, bool in_recovery);
62void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
63 u16 tid, int8_t direction);
64int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id);
65u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl);
66void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id);
67int qedi_create_sysfs_ctx_attr(struct qedi_ctx *qedi);
68void qedi_remove_sysfs_ctx_attr(struct qedi_ctx *qedi);
69void qedi_clearsq(struct qedi_ctx *qedi,
70 struct qedi_conn *qedi_conn,
71 struct iscsi_task *task);
72
73#endif
diff --git a/drivers/scsi/qedi/qedi_hsi.h b/drivers/scsi/qedi/qedi_hsi.h
new file mode 100644
index 000000000000..8ca44c78f093
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_hsi.h
@@ -0,0 +1,52 @@
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9#ifndef __QEDI_HSI__
10#define __QEDI_HSI__
11/*
12 * Add include to common target
13 */
14#include <linux/qed/common_hsi.h>
15
16/*
17 * Add include to common storage target
18 */
19#include <linux/qed/storage_common.h>
20
21/*
22 * Add include to common TCP target
23 */
24#include <linux/qed/tcp_common.h>
25
26/*
27 * Add include to common iSCSI target for both eCore and protocol driver
28 */
29#include <linux/qed/iscsi_common.h>
30
31/*
32 * iSCSI CMDQ element
33 */
34struct iscsi_cmdqe {
35 __le16 conn_id;
36 u8 invalid_command;
37 u8 cmd_hdr_type;
38 __le32 reserved1[2];
39 __le32 cmd_payload[13];
40};
41
42/*
43 * iSCSI CMD header type
44 */
45enum iscsi_cmd_hdr_type {
46 ISCSI_CMD_HDR_TYPE_BHS_ONLY /* iSCSI BHS with no expected AHS */,
47 ISCSI_CMD_HDR_TYPE_BHS_W_AHS /* iSCSI BHS with expected AHS */,
48 ISCSI_CMD_HDR_TYPE_AHS /* iSCSI AHS */,
49 MAX_ISCSI_CMD_HDR_TYPE
50};
51
52#endif /* __QEDI_HSI__ */
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
new file mode 100644
index 000000000000..d6a205433b66
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -0,0 +1,1624 @@
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#include <linux/blkdev.h>
11#include <linux/etherdevice.h>
12#include <linux/if_ether.h>
13#include <linux/if_vlan.h>
14#include <scsi/scsi_tcq.h>
15
16#include "qedi.h"
17#include "qedi_iscsi.h"
18#include "qedi_gbl.h"
19
20int qedi_recover_all_conns(struct qedi_ctx *qedi)
21{
22 struct qedi_conn *qedi_conn;
23 int i;
24
25 for (i = 0; i < qedi->max_active_conns; i++) {
26 qedi_conn = qedi_get_conn_from_id(qedi, i);
27 if (!qedi_conn)
28 continue;
29
30 qedi_start_conn_recovery(qedi, qedi_conn);
31 }
32
33 return SUCCESS;
34}
35
36static int qedi_eh_host_reset(struct scsi_cmnd *cmd)
37{
38 struct Scsi_Host *shost = cmd->device->host;
39 struct qedi_ctx *qedi;
40
41 qedi = iscsi_host_priv(shost);
42
43 return qedi_recover_all_conns(qedi);
44}
45
46struct scsi_host_template qedi_host_template = {
47 .module = THIS_MODULE,
48 .name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver",
49 .proc_name = QEDI_MODULE_NAME,
50 .queuecommand = iscsi_queuecommand,
51 .eh_abort_handler = iscsi_eh_abort,
52 .eh_device_reset_handler = iscsi_eh_device_reset,
53 .eh_target_reset_handler = iscsi_eh_recover_target,
54 .eh_host_reset_handler = qedi_eh_host_reset,
55 .target_alloc = iscsi_target_alloc,
56 .change_queue_depth = scsi_change_queue_depth,
57 .can_queue = QEDI_MAX_ISCSI_TASK,
58 .this_id = -1,
59 .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD,
60 .max_sectors = 0xffff,
61 .cmd_per_lun = 128,
62 .use_clustering = ENABLE_CLUSTERING,
63 .shost_attrs = qedi_shost_attrs,
64};
65
66static void qedi_conn_free_login_resources(struct qedi_ctx *qedi,
67 struct qedi_conn *qedi_conn)
68{
69 if (qedi_conn->gen_pdu.resp_bd_tbl) {
70 dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
71 qedi_conn->gen_pdu.resp_bd_tbl,
72 qedi_conn->gen_pdu.resp_bd_dma);
73 qedi_conn->gen_pdu.resp_bd_tbl = NULL;
74 }
75
76 if (qedi_conn->gen_pdu.req_bd_tbl) {
77 dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
78 qedi_conn->gen_pdu.req_bd_tbl,
79 qedi_conn->gen_pdu.req_bd_dma);
80 qedi_conn->gen_pdu.req_bd_tbl = NULL;
81 }
82
83 if (qedi_conn->gen_pdu.resp_buf) {
84 dma_free_coherent(&qedi->pdev->dev,
85 ISCSI_DEF_MAX_RECV_SEG_LEN,
86 qedi_conn->gen_pdu.resp_buf,
87 qedi_conn->gen_pdu.resp_dma_addr);
88 qedi_conn->gen_pdu.resp_buf = NULL;
89 }
90
91 if (qedi_conn->gen_pdu.req_buf) {
92 dma_free_coherent(&qedi->pdev->dev,
93 ISCSI_DEF_MAX_RECV_SEG_LEN,
94 qedi_conn->gen_pdu.req_buf,
95 qedi_conn->gen_pdu.req_dma_addr);
96 qedi_conn->gen_pdu.req_buf = NULL;
97 }
98}
99
100static int qedi_conn_alloc_login_resources(struct qedi_ctx *qedi,
101 struct qedi_conn *qedi_conn)
102{
103 qedi_conn->gen_pdu.req_buf =
104 dma_alloc_coherent(&qedi->pdev->dev,
105 ISCSI_DEF_MAX_RECV_SEG_LEN,
106 &qedi_conn->gen_pdu.req_dma_addr,
107 GFP_KERNEL);
108 if (!qedi_conn->gen_pdu.req_buf)
109 goto login_req_buf_failure;
110
111 qedi_conn->gen_pdu.req_buf_size = 0;
112 qedi_conn->gen_pdu.req_wr_ptr = qedi_conn->gen_pdu.req_buf;
113
114 qedi_conn->gen_pdu.resp_buf =
115 dma_alloc_coherent(&qedi->pdev->dev,
116 ISCSI_DEF_MAX_RECV_SEG_LEN,
117 &qedi_conn->gen_pdu.resp_dma_addr,
118 GFP_KERNEL);
119 if (!qedi_conn->gen_pdu.resp_buf)
120 goto login_resp_buf_failure;
121
122 qedi_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
123 qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf;
124
125 qedi_conn->gen_pdu.req_bd_tbl =
126 dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
127 &qedi_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
128 if (!qedi_conn->gen_pdu.req_bd_tbl)
129 goto login_req_bd_tbl_failure;
130
131 qedi_conn->gen_pdu.resp_bd_tbl =
132 dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
133 &qedi_conn->gen_pdu.resp_bd_dma,
134 GFP_KERNEL);
135 if (!qedi_conn->gen_pdu.resp_bd_tbl)
136 goto login_resp_bd_tbl_failure;
137
138 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SESS,
139 "Allocation successful, cid=0x%x\n",
140 qedi_conn->iscsi_conn_id);
141 return 0;
142
143login_resp_bd_tbl_failure:
144 dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
145 qedi_conn->gen_pdu.req_bd_tbl,
146 qedi_conn->gen_pdu.req_bd_dma);
147 qedi_conn->gen_pdu.req_bd_tbl = NULL;
148
149login_req_bd_tbl_failure:
150 dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
151 qedi_conn->gen_pdu.resp_buf,
152 qedi_conn->gen_pdu.resp_dma_addr);
153 qedi_conn->gen_pdu.resp_buf = NULL;
154login_resp_buf_failure:
155 dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
156 qedi_conn->gen_pdu.req_buf,
157 qedi_conn->gen_pdu.req_dma_addr);
158 qedi_conn->gen_pdu.req_buf = NULL;
159login_req_buf_failure:
160 iscsi_conn_printk(KERN_ERR, qedi_conn->cls_conn->dd_data,
161 "login resource alloc failed!!\n");
162 return -ENOMEM;
163}
164
165static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi,
166 struct iscsi_session *session)
167{
168 int i;
169
170 for (i = 0; i < session->cmds_max; i++) {
171 struct iscsi_task *task = session->cmds[i];
172 struct qedi_cmd *cmd = task->dd_data;
173
174 if (cmd->io_tbl.sge_tbl)
175 dma_free_coherent(&qedi->pdev->dev,
176 QEDI_ISCSI_MAX_BDS_PER_CMD *
177 sizeof(struct iscsi_sge),
178 cmd->io_tbl.sge_tbl,
179 cmd->io_tbl.sge_tbl_dma);
180
181 if (cmd->sense_buffer)
182 dma_free_coherent(&qedi->pdev->dev,
183 SCSI_SENSE_BUFFERSIZE,
184 cmd->sense_buffer,
185 cmd->sense_buffer_dma);
186 }
187}
188
189static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
190 struct qedi_cmd *cmd)
191{
192 struct qedi_io_bdt *io = &cmd->io_tbl;
193 struct iscsi_sge *sge;
194
195 io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev,
196 QEDI_ISCSI_MAX_BDS_PER_CMD *
197 sizeof(*sge),
198 &io->sge_tbl_dma, GFP_KERNEL);
199 if (!io->sge_tbl) {
200 iscsi_session_printk(KERN_ERR, session,
201 "Could not allocate BD table.\n");
202 return -ENOMEM;
203 }
204
205 io->sge_valid = 0;
206 return 0;
207}
208
209static int qedi_setup_cmd_pool(struct qedi_ctx *qedi,
210 struct iscsi_session *session)
211{
212 int i;
213
214 for (i = 0; i < session->cmds_max; i++) {
215 struct iscsi_task *task = session->cmds[i];
216 struct qedi_cmd *cmd = task->dd_data;
217
218 task->hdr = &cmd->hdr;
219 task->hdr_max = sizeof(struct iscsi_hdr);
220
221 if (qedi_alloc_sget(qedi, session, cmd))
222 goto free_sgets;
223
224 cmd->sense_buffer = dma_alloc_coherent(&qedi->pdev->dev,
225 SCSI_SENSE_BUFFERSIZE,
226 &cmd->sense_buffer_dma,
227 GFP_KERNEL);
228 if (!cmd->sense_buffer)
229 goto free_sgets;
230 }
231
232 return 0;
233
234free_sgets:
235 qedi_destroy_cmd_pool(qedi, session);
236 return -ENOMEM;
237}
238
239static struct iscsi_cls_session *
240qedi_session_create(struct iscsi_endpoint *ep, u16 cmds_max,
241 u16 qdepth, uint32_t initial_cmdsn)
242{
243 struct Scsi_Host *shost;
244 struct iscsi_cls_session *cls_session;
245 struct qedi_ctx *qedi;
246 struct qedi_endpoint *qedi_ep;
247
248 if (!ep)
249 return NULL;
250
251 qedi_ep = ep->dd_data;
252 shost = qedi_ep->qedi->shost;
253 qedi = iscsi_host_priv(shost);
254
255 if (cmds_max > qedi->max_sqes)
256 cmds_max = qedi->max_sqes;
257 else if (cmds_max < QEDI_SQ_WQES_MIN)
258 cmds_max = QEDI_SQ_WQES_MIN;
259
260 cls_session = iscsi_session_setup(&qedi_iscsi_transport, shost,
261 cmds_max, 0, sizeof(struct qedi_cmd),
262 initial_cmdsn, ISCSI_MAX_TARGET);
263 if (!cls_session) {
264 QEDI_ERR(&qedi->dbg_ctx,
265 "Failed to setup session for ep=%p\n", qedi_ep);
266 return NULL;
267 }
268
269 if (qedi_setup_cmd_pool(qedi, cls_session->dd_data)) {
270 QEDI_ERR(&qedi->dbg_ctx,
271 "Failed to setup cmd pool for ep=%p\n", qedi_ep);
272 goto session_teardown;
273 }
274
275 return cls_session;
276
277session_teardown:
278 iscsi_session_teardown(cls_session);
279 return NULL;
280}
281
282static void qedi_session_destroy(struct iscsi_cls_session *cls_session)
283{
284 struct iscsi_session *session = cls_session->dd_data;
285 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
286 struct qedi_ctx *qedi = iscsi_host_priv(shost);
287
288 qedi_destroy_cmd_pool(qedi, session);
289 iscsi_session_teardown(cls_session);
290}
291
292static struct iscsi_cls_conn *
293qedi_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
294{
295 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
296 struct qedi_ctx *qedi = iscsi_host_priv(shost);
297 struct iscsi_cls_conn *cls_conn;
298 struct qedi_conn *qedi_conn;
299 struct iscsi_conn *conn;
300
301 cls_conn = iscsi_conn_setup(cls_session, sizeof(*qedi_conn),
302 cid);
303 if (!cls_conn) {
304 QEDI_ERR(&qedi->dbg_ctx,
305 "conn_new: iscsi conn setup failed, cid=0x%x, cls_sess=%p!\n",
306 cid, cls_session);
307 return NULL;
308 }
309
310 conn = cls_conn->dd_data;
311 qedi_conn = conn->dd_data;
312 qedi_conn->cls_conn = cls_conn;
313 qedi_conn->qedi = qedi;
314 qedi_conn->ep = NULL;
315 qedi_conn->active_cmd_count = 0;
316 INIT_LIST_HEAD(&qedi_conn->active_cmd_list);
317 spin_lock_init(&qedi_conn->list_lock);
318
319 if (qedi_conn_alloc_login_resources(qedi, qedi_conn)) {
320 iscsi_conn_printk(KERN_ALERT, conn,
321 "conn_new: login resc alloc failed, cid=0x%x, cls_sess=%p!!\n",
322 cid, cls_session);
323 goto free_conn;
324 }
325
326 return cls_conn;
327
328free_conn:
329 iscsi_conn_teardown(cls_conn);
330 return NULL;
331}
332
333void qedi_mark_device_missing(struct iscsi_cls_session *cls_session)
334{
335 iscsi_block_session(cls_session);
336}
337
338void qedi_mark_device_available(struct iscsi_cls_session *cls_session)
339{
340 iscsi_unblock_session(cls_session);
341}
342
343static int qedi_bind_conn_to_iscsi_cid(struct qedi_ctx *qedi,
344 struct qedi_conn *qedi_conn)
345{
346 u32 iscsi_cid = qedi_conn->iscsi_conn_id;
347
348 if (qedi->cid_que.conn_cid_tbl[iscsi_cid]) {
349 iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data,
350 "conn bind - entry #%d not free\n",
351 iscsi_cid);
352 return -EBUSY;
353 }
354
355 qedi->cid_que.conn_cid_tbl[iscsi_cid] = qedi_conn;
356 return 0;
357}
358
359struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid)
360{
361 if (!qedi->cid_que.conn_cid_tbl) {
362 QEDI_ERR(&qedi->dbg_ctx, "missing conn<->cid table\n");
363 return NULL;
364
365 } else if (iscsi_cid >= qedi->max_active_conns) {
366 QEDI_ERR(&qedi->dbg_ctx, "wrong cid #%d\n", iscsi_cid);
367 return NULL;
368 }
369 return qedi->cid_que.conn_cid_tbl[iscsi_cid];
370}
371
372static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
373 struct iscsi_cls_conn *cls_conn,
374 u64 transport_fd, int is_leading)
375{
376 struct iscsi_conn *conn = cls_conn->dd_data;
377 struct qedi_conn *qedi_conn = conn->dd_data;
378 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
379 struct qedi_ctx *qedi = iscsi_host_priv(shost);
380 struct qedi_endpoint *qedi_ep;
381 struct iscsi_endpoint *ep;
382
383 ep = iscsi_lookup_endpoint(transport_fd);
384 if (!ep)
385 return -EINVAL;
386
387 qedi_ep = ep->dd_data;
388 if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) ||
389 (qedi_ep->state == EP_STATE_TCP_RST_RCVD))
390 return -EINVAL;
391
392 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
393 return -EINVAL;
394
395 qedi_ep->conn = qedi_conn;
396 qedi_conn->ep = qedi_ep;
397 qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
398 qedi_conn->fw_cid = qedi_ep->fw_cid;
399 qedi_conn->cmd_cleanup_req = 0;
400 qedi_conn->cmd_cleanup_cmpl = 0;
401
402 if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn))
403 return -EINVAL;
404
405 spin_lock_init(&qedi_conn->tmf_work_lock);
406 INIT_LIST_HEAD(&qedi_conn->tmf_work_list);
407 init_waitqueue_head(&qedi_conn->wait_queue);
408 return 0;
409}
410
411static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
412 struct qedi_conn *qedi_conn)
413{
414 struct qed_iscsi_params_update *conn_info;
415 struct iscsi_cls_conn *cls_conn = qedi_conn->cls_conn;
416 struct iscsi_conn *conn = cls_conn->dd_data;
417 struct qedi_endpoint *qedi_ep;
418 int rval;
419
420 qedi_ep = qedi_conn->ep;
421
422 conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
423 if (!conn_info) {
424 QEDI_ERR(&qedi->dbg_ctx, "memory alloc failed\n");
425 return -ENOMEM;
426 }
427
428 conn_info->update_flag = 0;
429
430 if (conn->hdrdgst_en)
431 SET_FIELD(conn_info->update_flag,
432 ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, true);
433 if (conn->datadgst_en)
434 SET_FIELD(conn_info->update_flag,
435 ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, true);
436 if (conn->session->initial_r2t_en)
437 SET_FIELD(conn_info->update_flag,
438 ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T,
439 true);
440 if (conn->session->imm_data_en)
441 SET_FIELD(conn_info->update_flag,
442 ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA,
443 true);
444
445 conn_info->max_seq_size = conn->session->max_burst;
446 conn_info->max_recv_pdu_length = conn->max_recv_dlength;
447 conn_info->max_send_pdu_length = conn->max_xmit_dlength;
448 conn_info->first_seq_length = conn->session->first_burst;
449 conn_info->exp_stat_sn = conn->exp_statsn;
450
451 rval = qedi_ops->update_conn(qedi->cdev, qedi_ep->handle,
452 conn_info);
453 if (rval) {
454 rval = -ENXIO;
455 QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n");
456 goto update_conn_err;
457 }
458
459 kfree(conn_info);
460 rval = 0;
461
462update_conn_err:
463 return rval;
464}
465
466static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en)
467{
468 u16 mss = 0;
469 u16 hdrs = TCP_HDR_LEN;
470
471 if (is_ipv6)
472 hdrs += IPV6_HDR_LEN;
473 else
474 hdrs += IPV4_HDR_LEN;
475
476 if (vlan_en)
477 hdrs += VLAN_LEN;
478
479 mss = pmtu - hdrs;
480
481 if (tcp_ts_en)
482 mss -= TCP_OPTION_LEN;
483
484 if (!mss)
485 mss = DEF_MSS;
486
487 return mss;
488}
489
490static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
491{
492 struct qedi_ctx *qedi = qedi_ep->qedi;
493 struct qed_iscsi_params_offload *conn_info;
494 int rval;
495 int i;
496
497 conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
498 if (!conn_info) {
499 QEDI_ERR(&qedi->dbg_ctx,
500 "Failed to allocate memory ep=%p\n", qedi_ep);
501 return -ENOMEM;
502 }
503
504 ether_addr_copy(conn_info->src.mac, qedi_ep->src_mac);
505 ether_addr_copy(conn_info->dst.mac, qedi_ep->dst_mac);
506
507 conn_info->src.ip[0] = ntohl(qedi_ep->src_addr[0]);
508 conn_info->dst.ip[0] = ntohl(qedi_ep->dst_addr[0]);
509
510 if (qedi_ep->ip_type == TCP_IPV4) {
511 conn_info->ip_version = 0;
512 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
513 "After ntohl: src_addr=%pI4, dst_addr=%pI4\n",
514 qedi_ep->src_addr, qedi_ep->dst_addr);
515 } else {
516 for (i = 1; i < 4; i++) {
517 conn_info->src.ip[i] = ntohl(qedi_ep->src_addr[i]);
518 conn_info->dst.ip[i] = ntohl(qedi_ep->dst_addr[i]);
519 }
520
521 conn_info->ip_version = 1;
522 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
523 "After ntohl: src_addr=%pI6, dst_addr=%pI6\n",
524 qedi_ep->src_addr, qedi_ep->dst_addr);
525 }
526
527 conn_info->src.port = qedi_ep->src_port;
528 conn_info->dst.port = qedi_ep->dst_port;
529
530 conn_info->layer_code = ISCSI_SLOW_PATH_LAYER_CODE;
531 conn_info->sq_pbl_addr = qedi_ep->sq_pbl_dma;
532 conn_info->vlan_id = qedi_ep->vlan_id;
533
534 SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_TS_EN, 1);
535 SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_EN, 1);
536 SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_CNT_EN, 1);
537 SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_KA_EN, 1);
538
539 conn_info->default_cq = (qedi_ep->fw_cid % 8);
540
541 conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
542 conn_info->dup_ack_theshold = 3;
543 conn_info->rcv_wnd = 65535;
544 conn_info->cwnd = DEF_MAX_CWND;
545
546 conn_info->ss_thresh = 65535;
547 conn_info->srtt = 300;
548 conn_info->rtt_var = 150;
549 conn_info->flow_label = 0;
550 conn_info->ka_timeout = DEF_KA_TIMEOUT;
551 conn_info->ka_interval = DEF_KA_INTERVAL;
552 conn_info->max_rt_time = DEF_MAX_RT_TIME;
553 conn_info->ttl = DEF_TTL;
554 conn_info->tos_or_tc = DEF_TOS;
555 conn_info->remote_port = qedi_ep->dst_port;
556 conn_info->local_port = qedi_ep->src_port;
557
558 conn_info->mss = qedi_calc_mss(qedi_ep->pmtu,
559 (qedi_ep->ip_type == TCP_IPV6),
560 1, (qedi_ep->vlan_id != 0));
561
562 conn_info->rcv_wnd_scale = 4;
563 conn_info->ts_ticks_per_second = 1000;
564 conn_info->da_timeout_value = 200;
565 conn_info->ack_frequency = 2;
566
567 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
568 "Default cq index [%d], mss [%d]\n",
569 conn_info->default_cq, conn_info->mss);
570
571 rval = qedi_ops->offload_conn(qedi->cdev, qedi_ep->handle, conn_info);
572 if (rval)
573 QEDI_ERR(&qedi->dbg_ctx, "offload_conn returned %d, ep=%p\n",
574 rval, qedi_ep);
575
576 kfree(conn_info);
577 return rval;
578}
579
580static int qedi_conn_start(struct iscsi_cls_conn *cls_conn)
581{
582 struct iscsi_conn *conn = cls_conn->dd_data;
583 struct qedi_conn *qedi_conn = conn->dd_data;
584 struct qedi_ctx *qedi;
585 int rval;
586
587 qedi = qedi_conn->qedi;
588
589 rval = qedi_iscsi_update_conn(qedi, qedi_conn);
590 if (rval) {
591 iscsi_conn_printk(KERN_ALERT, conn,
592 "conn_start: FW oflload conn failed.\n");
593 rval = -EINVAL;
594 goto start_err;
595 }
596
597 clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
598 qedi_conn->abrt_conn = 0;
599
600 rval = iscsi_conn_start(cls_conn);
601 if (rval) {
602 iscsi_conn_printk(KERN_ALERT, conn,
603 "iscsi_conn_start: FW oflload conn failed!!\n");
604 }
605
606start_err:
607 return rval;
608}
609
610static void qedi_conn_destroy(struct iscsi_cls_conn *cls_conn)
611{
612 struct iscsi_conn *conn = cls_conn->dd_data;
613 struct qedi_conn *qedi_conn = conn->dd_data;
614 struct Scsi_Host *shost;
615 struct qedi_ctx *qedi;
616
617 shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
618 qedi = iscsi_host_priv(shost);
619
620 qedi_conn_free_login_resources(qedi, qedi_conn);
621 iscsi_conn_teardown(cls_conn);
622}
623
624static int qedi_ep_get_param(struct iscsi_endpoint *ep,
625 enum iscsi_param param, char *buf)
626{
627 struct qedi_endpoint *qedi_ep = ep->dd_data;
628 int len;
629
630 if (!qedi_ep)
631 return -ENOTCONN;
632
633 switch (param) {
634 case ISCSI_PARAM_CONN_PORT:
635 len = sprintf(buf, "%hu\n", qedi_ep->dst_port);
636 break;
637 case ISCSI_PARAM_CONN_ADDRESS:
638 if (qedi_ep->ip_type == TCP_IPV4)
639 len = sprintf(buf, "%pI4\n", qedi_ep->dst_addr);
640 else
641 len = sprintf(buf, "%pI6\n", qedi_ep->dst_addr);
642 break;
643 default:
644 return -ENOTCONN;
645 }
646
647 return len;
648}
649
650static int qedi_host_get_param(struct Scsi_Host *shost,
651 enum iscsi_host_param param, char *buf)
652{
653 struct qedi_ctx *qedi;
654 int len;
655
656 qedi = iscsi_host_priv(shost);
657
658 switch (param) {
659 case ISCSI_HOST_PARAM_HWADDRESS:
660 len = sysfs_format_mac(buf, qedi->mac, 6);
661 break;
662 case ISCSI_HOST_PARAM_NETDEV_NAME:
663 len = sprintf(buf, "host%d\n", shost->host_no);
664 break;
665 case ISCSI_HOST_PARAM_IPADDRESS:
666 if (qedi->ip_type == TCP_IPV4)
667 len = sprintf(buf, "%pI4\n", qedi->src_ip);
668 else
669 len = sprintf(buf, "%pI6\n", qedi->src_ip);
670 break;
671 default:
672 return iscsi_host_get_param(shost, param, buf);
673 }
674
675 return len;
676}
677
678static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
679 struct iscsi_stats *stats)
680{
681 struct iscsi_conn *conn = cls_conn->dd_data;
682 struct qed_iscsi_stats iscsi_stats;
683 struct Scsi_Host *shost;
684 struct qedi_ctx *qedi;
685
686 shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
687 qedi = iscsi_host_priv(shost);
688 qedi_ops->get_stats(qedi->cdev, &iscsi_stats);
689
690 conn->txdata_octets = iscsi_stats.iscsi_tx_bytes_cnt;
691 conn->rxdata_octets = iscsi_stats.iscsi_rx_bytes_cnt;
692 conn->dataout_pdus_cnt = (uint32_t)iscsi_stats.iscsi_tx_data_pdu_cnt;
693 conn->datain_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_data_pdu_cnt;
694 conn->r2t_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_r2t_pdu_cnt;
695
696 stats->txdata_octets = conn->txdata_octets;
697 stats->rxdata_octets = conn->rxdata_octets;
698 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
699 stats->dataout_pdus = conn->dataout_pdus_cnt;
700 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
701 stats->datain_pdus = conn->datain_pdus_cnt;
702 stats->r2t_pdus = conn->r2t_pdus_cnt;
703 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
704 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
705 stats->digest_err = 0;
706 stats->timeout_err = 0;
707 strcpy(stats->custom[0].desc, "eh_abort_cnt");
708 stats->custom[0].value = conn->eh_abort_cnt;
709 stats->custom_length = 1;
710}
711
712static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn)
713{
714 struct iscsi_sge *bd_tbl;
715
716 bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
717
718 bd_tbl->sge_addr.hi =
719 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
720 bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr;
721 bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr -
722 qedi_conn->gen_pdu.req_buf;
723 bd_tbl->reserved0 = 0;
724 bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
725 bd_tbl->sge_addr.hi =
726 (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
727 bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr;
728 bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN;
729 bd_tbl->reserved0 = 0;
730}
731
732static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
733{
734 struct qedi_cmd *cmd = task->dd_data;
735 struct qedi_conn *qedi_conn = cmd->conn;
736 char *buf;
737 int data_len;
738 int rc = 0;
739
740 qedi_iscsi_prep_generic_pdu_bd(qedi_conn);
741 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
742 case ISCSI_OP_LOGIN:
743 qedi_send_iscsi_login(qedi_conn, task);
744 break;
745 case ISCSI_OP_NOOP_OUT:
746 data_len = qedi_conn->gen_pdu.req_buf_size;
747 buf = qedi_conn->gen_pdu.req_buf;
748 if (data_len)
749 rc = qedi_send_iscsi_nopout(qedi_conn, task,
750 buf, data_len, 1);
751 else
752 rc = qedi_send_iscsi_nopout(qedi_conn, task,
753 NULL, 0, 1);
754 break;
755 case ISCSI_OP_LOGOUT:
756 rc = qedi_send_iscsi_logout(qedi_conn, task);
757 break;
758 case ISCSI_OP_SCSI_TMFUNC:
759 rc = qedi_iscsi_abort_work(qedi_conn, task);
760 break;
761 case ISCSI_OP_TEXT:
762 rc = qedi_send_iscsi_text(qedi_conn, task);
763 break;
764 default:
765 iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data,
766 "unsupported op 0x%x\n", task->hdr->opcode);
767 }
768
769 return rc;
770}
771
772static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
773{
774 struct qedi_conn *qedi_conn = conn->dd_data;
775 struct qedi_cmd *cmd = task->dd_data;
776
777 memset(qedi_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
778
779 qedi_conn->gen_pdu.req_buf_size = task->data_count;
780
781 if (task->data_count) {
782 memcpy(qedi_conn->gen_pdu.req_buf, task->data,
783 task->data_count);
784 qedi_conn->gen_pdu.req_wr_ptr =
785 qedi_conn->gen_pdu.req_buf + task->data_count;
786 }
787
788 cmd->conn = conn->dd_data;
789 cmd->scsi_cmd = NULL;
790 return qedi_iscsi_send_generic_request(task);
791}
792
793static int qedi_task_xmit(struct iscsi_task *task)
794{
795 struct iscsi_conn *conn = task->conn;
796 struct qedi_conn *qedi_conn = conn->dd_data;
797 struct qedi_cmd *cmd = task->dd_data;
798 struct scsi_cmnd *sc = task->sc;
799
800 cmd->state = 0;
801 cmd->task = NULL;
802 cmd->use_slowpath = false;
803 cmd->conn = qedi_conn;
804 cmd->task = task;
805 cmd->io_cmd_in_list = false;
806 INIT_LIST_HEAD(&cmd->io_cmd);
807
808 if (!sc)
809 return qedi_mtask_xmit(conn, task);
810
811 cmd->scsi_cmd = sc;
812 return qedi_iscsi_send_ioreq(task);
813}
814
815static struct iscsi_endpoint *
816qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
817 int non_blocking)
818{
819 struct qedi_ctx *qedi;
820 struct iscsi_endpoint *ep;
821 struct qedi_endpoint *qedi_ep;
822 struct sockaddr_in *addr;
823 struct sockaddr_in6 *addr6;
824 struct qed_dev *cdev = NULL;
825 struct qedi_uio_dev *udev = NULL;
826 struct iscsi_path path_req;
827 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
828 u32 iscsi_cid = QEDI_CID_RESERVED;
829 u16 len = 0;
830 char *buf = NULL;
831 int ret;
832
833 if (!shost) {
834 ret = -ENXIO;
835 QEDI_ERR(NULL, "shost is NULL\n");
836 return ERR_PTR(ret);
837 }
838
839 if (do_not_recover) {
840 ret = -ENOMEM;
841 return ERR_PTR(ret);
842 }
843
844 qedi = iscsi_host_priv(shost);
845 cdev = qedi->cdev;
846 udev = qedi->udev;
847
848 if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) ||
849 test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
850 ret = -ENOMEM;
851 return ERR_PTR(ret);
852 }
853
854 ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint));
855 if (!ep) {
856 QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n");
857 ret = -ENOMEM;
858 return ERR_PTR(ret);
859 }
860 qedi_ep = ep->dd_data;
861 memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
862 qedi_ep->state = EP_STATE_IDLE;
863 qedi_ep->iscsi_cid = (u32)-1;
864 qedi_ep->qedi = qedi;
865
866 if (dst_addr->sa_family == AF_INET) {
867 addr = (struct sockaddr_in *)dst_addr;
868 memcpy(qedi_ep->dst_addr, &addr->sin_addr.s_addr,
869 sizeof(struct in_addr));
870 qedi_ep->dst_port = ntohs(addr->sin_port);
871 qedi_ep->ip_type = TCP_IPV4;
872 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
873 "dst_addr=%pI4, dst_port=%u\n",
874 qedi_ep->dst_addr, qedi_ep->dst_port);
875 } else if (dst_addr->sa_family == AF_INET6) {
876 addr6 = (struct sockaddr_in6 *)dst_addr;
877 memcpy(qedi_ep->dst_addr, &addr6->sin6_addr,
878 sizeof(struct in6_addr));
879 qedi_ep->dst_port = ntohs(addr6->sin6_port);
880 qedi_ep->ip_type = TCP_IPV6;
881 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
882 "dst_addr=%pI6, dst_port=%u\n",
883 qedi_ep->dst_addr, qedi_ep->dst_port);
884 } else {
885 QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n");
886 }
887
888 if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
889 QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
890 ret = -ENXIO;
891 goto ep_conn_exit;
892 }
893
894 ret = qedi_alloc_sq(qedi, qedi_ep);
895 if (ret)
896 goto ep_conn_exit;
897
898 ret = qedi_ops->acquire_conn(qedi->cdev, &qedi_ep->handle,
899 &qedi_ep->fw_cid, &qedi_ep->p_doorbell);
900
901 if (ret) {
902 QEDI_ERR(&qedi->dbg_ctx, "Could not acquire connection\n");
903 ret = -ENXIO;
904 goto ep_free_sq;
905 }
906
907 iscsi_cid = qedi_ep->handle;
908 qedi_ep->iscsi_cid = iscsi_cid;
909
910 init_waitqueue_head(&qedi_ep->ofld_wait);
911 init_waitqueue_head(&qedi_ep->tcp_ofld_wait);
912 qedi_ep->state = EP_STATE_OFLDCONN_START;
913 qedi->ep_tbl[iscsi_cid] = qedi_ep;
914
915 buf = (char *)&path_req;
916 len = sizeof(path_req);
917 memset(&path_req, 0, len);
918
919 msg_type = ISCSI_KEVENT_PATH_REQ;
920 path_req.handle = (u64)qedi_ep->iscsi_cid;
921 path_req.pmtu = qedi->ll2_mtu;
922 qedi_ep->pmtu = qedi->ll2_mtu;
923 if (qedi_ep->ip_type == TCP_IPV4) {
924 memcpy(&path_req.dst.v4_addr, &qedi_ep->dst_addr,
925 sizeof(struct in_addr));
926 path_req.ip_addr_len = 4;
927 } else {
928 memcpy(&path_req.dst.v6_addr, &qedi_ep->dst_addr,
929 sizeof(struct in6_addr));
930 path_req.ip_addr_len = 16;
931 }
932
933 ret = iscsi_offload_mesg(shost, &qedi_iscsi_transport, msg_type, buf,
934 len);
935 if (ret) {
936 QEDI_ERR(&qedi->dbg_ctx,
937 "iscsi_offload_mesg() failed for cid=0x%x ret=%d\n",
938 iscsi_cid, ret);
939 goto ep_rel_conn;
940 }
941
942 atomic_inc(&qedi->num_offloads);
943 return ep;
944
945ep_rel_conn:
946 qedi->ep_tbl[iscsi_cid] = NULL;
947 ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
948 if (ret)
949 QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n",
950 ret);
951ep_free_sq:
952 qedi_free_sq(qedi, qedi_ep);
953ep_conn_exit:
954 iscsi_destroy_endpoint(ep);
955 return ERR_PTR(ret);
956}
957
958static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
959{
960 struct qedi_endpoint *qedi_ep;
961 int ret = 0;
962
963 if (do_not_recover)
964 return 1;
965
966 qedi_ep = ep->dd_data;
967 if (qedi_ep->state == EP_STATE_IDLE ||
968 qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
969 return -1;
970
971 if (qedi_ep->state == EP_STATE_OFLDCONN_COMPL)
972 ret = 1;
973
974 ret = wait_event_interruptible_timeout(qedi_ep->ofld_wait,
975 QEDI_OFLD_WAIT_STATE(qedi_ep),
976 msecs_to_jiffies(timeout_ms));
977
978 if (qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
979 ret = -1;
980
981 if (ret > 0)
982 return 1;
983 else if (!ret)
984 return 0;
985 else
986 return ret;
987}
988
989static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
990{
991 struct qedi_cmd *cmd, *cmd_tmp;
992
993 list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
994 io_cmd) {
995 list_del_init(&cmd->io_cmd);
996 qedi_conn->active_cmd_count--;
997 }
998}
999
1000static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
1001{
1002 struct qedi_endpoint *qedi_ep;
1003 struct qedi_conn *qedi_conn = NULL;
1004 struct iscsi_conn *conn = NULL;
1005 struct qedi_ctx *qedi;
1006 int ret = 0;
1007 int wait_delay = 20 * HZ;
1008 int abrt_conn = 0;
1009 int count = 10;
1010
1011 qedi_ep = ep->dd_data;
1012 qedi = qedi_ep->qedi;
1013
1014 flush_work(&qedi_ep->offload_work);
1015
1016 if (qedi_ep->conn) {
1017 qedi_conn = qedi_ep->conn;
1018 conn = qedi_conn->cls_conn->dd_data;
1019 iscsi_suspend_queue(conn);
1020 abrt_conn = qedi_conn->abrt_conn;
1021
1022 while (count--) {
1023 if (!test_bit(QEDI_CONN_FW_CLEANUP,
1024 &qedi_conn->flags)) {
1025 break;
1026 }
1027 msleep(1000);
1028 }
1029
1030 if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
1031 if (do_not_recover) {
1032 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1033 "Do not recover cid=0x%x\n",
1034 qedi_ep->iscsi_cid);
1035 goto ep_exit_recover;
1036 }
1037 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1038 "Reset recovery cid=0x%x, qedi_ep=%p, state=0x%x\n",
1039 qedi_ep->iscsi_cid, qedi_ep, qedi_ep->state);
1040 qedi_cleanup_active_cmd_list(qedi_conn);
1041 goto ep_release_conn;
1042 }
1043 }
1044
1045 if (do_not_recover)
1046 goto ep_exit_recover;
1047
1048 switch (qedi_ep->state) {
1049 case EP_STATE_OFLDCONN_START:
1050 goto ep_release_conn;
1051 case EP_STATE_OFLDCONN_FAILED:
1052 break;
1053 case EP_STATE_OFLDCONN_COMPL:
1054 if (unlikely(!qedi_conn))
1055 break;
1056
1057 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1058 "Active cmd count=%d, abrt_conn=%d, ep state=0x%x, cid=0x%x, qedi_conn=%p\n",
1059 qedi_conn->active_cmd_count, abrt_conn,
1060 qedi_ep->state,
1061 qedi_ep->iscsi_cid,
1062 qedi_ep->conn
1063 );
1064
1065 if (!qedi_conn->active_cmd_count)
1066 abrt_conn = 0;
1067 else
1068 abrt_conn = 1;
1069
1070 if (abrt_conn)
1071 qedi_clearsq(qedi, qedi_conn, NULL);
1072 break;
1073 default:
1074 break;
1075 }
1076
1077 qedi_ep->state = EP_STATE_DISCONN_START;
1078 ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
1079 if (ret) {
1080 QEDI_WARN(&qedi->dbg_ctx,
1081 "destroy_conn failed returned %d\n", ret);
1082 } else {
1083 ret = wait_event_interruptible_timeout(
1084 qedi_ep->tcp_ofld_wait,
1085 (qedi_ep->state !=
1086 EP_STATE_DISCONN_START),
1087 wait_delay);
1088 if ((ret <= 0) || (qedi_ep->state == EP_STATE_DISCONN_START)) {
1089 QEDI_WARN(&qedi->dbg_ctx,
1090 "Destroy conn timedout or interrupted, ret=%d, delay=%d, cid=0x%x\n",
1091 ret, wait_delay, qedi_ep->iscsi_cid);
1092 }
1093 }
1094
1095ep_release_conn:
1096 ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
1097 if (ret)
1098 QEDI_WARN(&qedi->dbg_ctx,
1099 "release_conn returned %d, cid=0x%x\n",
1100 ret, qedi_ep->iscsi_cid);
1101ep_exit_recover:
1102 qedi_ep->state = EP_STATE_IDLE;
1103 qedi->ep_tbl[qedi_ep->iscsi_cid] = NULL;
1104 qedi->cid_que.conn_cid_tbl[qedi_ep->iscsi_cid] = NULL;
1105 qedi_free_id(&qedi->lcl_port_tbl, qedi_ep->src_port);
1106 qedi_free_sq(qedi, qedi_ep);
1107
1108 if (qedi_conn)
1109 qedi_conn->ep = NULL;
1110
1111 qedi_ep->conn = NULL;
1112 qedi_ep->qedi = NULL;
1113 atomic_dec(&qedi->num_offloads);
1114
1115 iscsi_destroy_endpoint(ep);
1116}
1117
1118static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
1119{
1120 struct qed_dev *cdev = qedi->cdev;
1121 struct qedi_uio_dev *udev;
1122 struct qedi_uio_ctrl *uctrl;
1123 struct sk_buff *skb;
1124 u32 len;
1125 int rc = 0;
1126
1127 udev = qedi->udev;
1128 if (!udev) {
1129 QEDI_ERR(&qedi->dbg_ctx, "udev is NULL.\n");
1130 return -EINVAL;
1131 }
1132
1133 uctrl = (struct qedi_uio_ctrl *)udev->uctrl;
1134 if (!uctrl) {
1135 QEDI_ERR(&qedi->dbg_ctx, "uctlr is NULL.\n");
1136 return -EINVAL;
1137 }
1138
1139 len = uctrl->host_tx_pkt_len;
1140 if (!len) {
1141 QEDI_ERR(&qedi->dbg_ctx, "Invalid len %u\n", len);
1142 return -EINVAL;
1143 }
1144
1145 skb = alloc_skb(len, GFP_ATOMIC);
1146 if (!skb) {
1147 QEDI_ERR(&qedi->dbg_ctx, "alloc_skb failed\n");
1148 return -EINVAL;
1149 }
1150
1151 skb_put(skb, len);
1152 memcpy(skb->data, udev->tx_pkt, len);
1153 skb->ip_summed = CHECKSUM_NONE;
1154
1155 if (vlanid)
1156 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
1157
1158 rc = qedi_ops->ll2->start_xmit(cdev, skb);
1159 if (rc) {
1160 QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n",
1161 rc);
1162 kfree_skb(skb);
1163 }
1164
1165 uctrl->host_tx_pkt_len = 0;
1166 uctrl->hw_tx_cons++;
1167
1168 return rc;
1169}
1170
1171static void qedi_offload_work(struct work_struct *work)
1172{
1173 struct qedi_endpoint *qedi_ep =
1174 container_of(work, struct qedi_endpoint, offload_work);
1175 struct qedi_ctx *qedi;
1176 int wait_delay = 20 * HZ;
1177 int ret;
1178
1179 qedi = qedi_ep->qedi;
1180
1181 ret = qedi_iscsi_offload_conn(qedi_ep);
1182 if (ret) {
1183 QEDI_ERR(&qedi->dbg_ctx,
1184 "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
1185 qedi_ep->iscsi_cid, qedi_ep, ret);
1186 qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
1187 return;
1188 }
1189
1190 ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
1191 (qedi_ep->state ==
1192 EP_STATE_OFLDCONN_COMPL),
1193 wait_delay);
1194 if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
1195 qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
1196 QEDI_ERR(&qedi->dbg_ctx,
1197 "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
1198 qedi_ep->iscsi_cid, qedi_ep);
1199 }
1200}
1201
1202static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
1203{
1204 struct qedi_ctx *qedi;
1205 struct qedi_endpoint *qedi_ep;
1206 int ret = 0;
1207 u32 iscsi_cid;
1208 u16 port_id = 0;
1209
1210 if (!shost) {
1211 ret = -ENXIO;
1212 QEDI_ERR(NULL, "shost is NULL\n");
1213 return ret;
1214 }
1215
1216 if (strcmp(shost->hostt->proc_name, "qedi")) {
1217 ret = -ENXIO;
1218 QEDI_ERR(NULL, "shost %s is invalid\n",
1219 shost->hostt->proc_name);
1220 return ret;
1221 }
1222
1223 qedi = iscsi_host_priv(shost);
1224 if (path_data->handle == QEDI_PATH_HANDLE) {
1225 ret = qedi_data_avail(qedi, path_data->vlan_id);
1226 goto set_path_exit;
1227 }
1228
1229 iscsi_cid = (u32)path_data->handle;
1230 qedi_ep = qedi->ep_tbl[iscsi_cid];
1231 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
1232 "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
1233
1234 if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
1235 QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
1236 ret = -EIO;
1237 goto set_path_exit;
1238 }
1239
1240 ether_addr_copy(&qedi_ep->src_mac[0], &qedi->mac[0]);
1241 ether_addr_copy(&qedi_ep->dst_mac[0], &path_data->mac_addr[0]);
1242
1243 qedi_ep->vlan_id = path_data->vlan_id;
1244 if (path_data->pmtu < DEF_PATH_MTU) {
1245 qedi_ep->pmtu = qedi->ll2_mtu;
1246 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1247 "MTU cannot be %u, using default MTU %u\n",
1248 path_data->pmtu, qedi_ep->pmtu);
1249 }
1250
1251 if (path_data->pmtu != qedi->ll2_mtu) {
1252 if (path_data->pmtu > JUMBO_MTU) {
1253 ret = -EINVAL;
1254 QEDI_ERR(NULL, "Invalid MTU %u\n", path_data->pmtu);
1255 goto set_path_exit;
1256 }
1257
1258 qedi_reset_host_mtu(qedi, path_data->pmtu);
1259 qedi_ep->pmtu = qedi->ll2_mtu;
1260 }
1261
1262 port_id = qedi_ep->src_port;
1263 if (port_id >= QEDI_LOCAL_PORT_MIN &&
1264 port_id < QEDI_LOCAL_PORT_MAX) {
1265 if (qedi_alloc_id(&qedi->lcl_port_tbl, port_id))
1266 port_id = 0;
1267 } else {
1268 port_id = 0;
1269 }
1270
1271 if (!port_id) {
1272 port_id = qedi_alloc_new_id(&qedi->lcl_port_tbl);
1273 if (port_id == QEDI_LOCAL_PORT_INVALID) {
1274 QEDI_ERR(&qedi->dbg_ctx,
1275 "Failed to allocate port id for iscsi_cid=0x%x\n",
1276 iscsi_cid);
1277 ret = -ENOMEM;
1278 goto set_path_exit;
1279 }
1280 }
1281
1282 qedi_ep->src_port = port_id;
1283
1284 if (qedi_ep->ip_type == TCP_IPV4) {
1285 memcpy(&qedi_ep->src_addr[0], &path_data->src.v4_addr,
1286 sizeof(struct in_addr));
1287 memcpy(&qedi->src_ip[0], &path_data->src.v4_addr,
1288 sizeof(struct in_addr));
1289 qedi->ip_type = TCP_IPV4;
1290
1291 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
1292 "src addr:port=%pI4:%u, dst addr:port=%pI4:%u\n",
1293 qedi_ep->src_addr, qedi_ep->src_port,
1294 qedi_ep->dst_addr, qedi_ep->dst_port);
1295 } else {
1296 memcpy(&qedi_ep->src_addr[0], &path_data->src.v6_addr,
1297 sizeof(struct in6_addr));
1298 memcpy(&qedi->src_ip[0], &path_data->src.v6_addr,
1299 sizeof(struct in6_addr));
1300 qedi->ip_type = TCP_IPV6;
1301
1302 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
1303 "src addr:port=%pI6:%u, dst addr:port=%pI6:%u\n",
1304 qedi_ep->src_addr, qedi_ep->src_port,
1305 qedi_ep->dst_addr, qedi_ep->dst_port);
1306 }
1307
1308 INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
1309 queue_work(qedi->offload_thread, &qedi_ep->offload_work);
1310
1311 ret = 0;
1312
1313set_path_exit:
1314 return ret;
1315}
1316
1317static umode_t qedi_attr_is_visible(int param_type, int param)
1318{
1319 switch (param_type) {
1320 case ISCSI_HOST_PARAM:
1321 switch (param) {
1322 case ISCSI_HOST_PARAM_NETDEV_NAME:
1323 case ISCSI_HOST_PARAM_HWADDRESS:
1324 case ISCSI_HOST_PARAM_IPADDRESS:
1325 return 0444;
1326 default:
1327 return 0;
1328 }
1329 case ISCSI_PARAM:
1330 switch (param) {
1331 case ISCSI_PARAM_MAX_RECV_DLENGTH:
1332 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
1333 case ISCSI_PARAM_HDRDGST_EN:
1334 case ISCSI_PARAM_DATADGST_EN:
1335 case ISCSI_PARAM_CONN_ADDRESS:
1336 case ISCSI_PARAM_CONN_PORT:
1337 case ISCSI_PARAM_EXP_STATSN:
1338 case ISCSI_PARAM_PERSISTENT_ADDRESS:
1339 case ISCSI_PARAM_PERSISTENT_PORT:
1340 case ISCSI_PARAM_PING_TMO:
1341 case ISCSI_PARAM_RECV_TMO:
1342 case ISCSI_PARAM_INITIAL_R2T_EN:
1343 case ISCSI_PARAM_MAX_R2T:
1344 case ISCSI_PARAM_IMM_DATA_EN:
1345 case ISCSI_PARAM_FIRST_BURST:
1346 case ISCSI_PARAM_MAX_BURST:
1347 case ISCSI_PARAM_PDU_INORDER_EN:
1348 case ISCSI_PARAM_DATASEQ_INORDER_EN:
1349 case ISCSI_PARAM_ERL:
1350 case ISCSI_PARAM_TARGET_NAME:
1351 case ISCSI_PARAM_TPGT:
1352 case ISCSI_PARAM_USERNAME:
1353 case ISCSI_PARAM_PASSWORD:
1354 case ISCSI_PARAM_USERNAME_IN:
1355 case ISCSI_PARAM_PASSWORD_IN:
1356 case ISCSI_PARAM_FAST_ABORT:
1357 case ISCSI_PARAM_ABORT_TMO:
1358 case ISCSI_PARAM_LU_RESET_TMO:
1359 case ISCSI_PARAM_TGT_RESET_TMO:
1360 case ISCSI_PARAM_IFACE_NAME:
1361 case ISCSI_PARAM_INITIATOR_NAME:
1362 case ISCSI_PARAM_BOOT_ROOT:
1363 case ISCSI_PARAM_BOOT_NIC:
1364 case ISCSI_PARAM_BOOT_TARGET:
1365 return 0444;
1366 default:
1367 return 0;
1368 }
1369 }
1370
1371 return 0;
1372}
1373
1374static void qedi_cleanup_task(struct iscsi_task *task)
1375{
1376 if (!task->sc || task->state == ISCSI_TASK_PENDING) {
1377 QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n",
1378 atomic_read(&task->refcount));
1379 return;
1380 }
1381
1382 qedi_iscsi_unmap_sg_list(task->dd_data);
1383}
1384
1385struct iscsi_transport qedi_iscsi_transport = {
1386 .owner = THIS_MODULE,
1387 .name = QEDI_MODULE_NAME,
1388 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST |
1389 CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO,
1390 .create_session = qedi_session_create,
1391 .destroy_session = qedi_session_destroy,
1392 .create_conn = qedi_conn_create,
1393 .bind_conn = qedi_conn_bind,
1394 .start_conn = qedi_conn_start,
1395 .stop_conn = iscsi_conn_stop,
1396 .destroy_conn = qedi_conn_destroy,
1397 .set_param = iscsi_set_param,
1398 .get_ep_param = qedi_ep_get_param,
1399 .get_conn_param = iscsi_conn_get_param,
1400 .get_session_param = iscsi_session_get_param,
1401 .get_host_param = qedi_host_get_param,
1402 .send_pdu = iscsi_conn_send_pdu,
1403 .get_stats = qedi_conn_get_stats,
1404 .xmit_task = qedi_task_xmit,
1405 .cleanup_task = qedi_cleanup_task,
1406 .session_recovery_timedout = iscsi_session_recovery_timedout,
1407 .ep_connect = qedi_ep_connect,
1408 .ep_poll = qedi_ep_poll,
1409 .ep_disconnect = qedi_ep_disconnect,
1410 .set_path = qedi_set_path,
1411 .attr_is_visible = qedi_attr_is_visible,
1412};
1413
1414void qedi_start_conn_recovery(struct qedi_ctx *qedi,
1415 struct qedi_conn *qedi_conn)
1416{
1417 struct iscsi_cls_session *cls_sess;
1418 struct iscsi_cls_conn *cls_conn;
1419 struct iscsi_conn *conn;
1420
1421 cls_conn = qedi_conn->cls_conn;
1422 conn = cls_conn->dd_data;
1423 cls_sess = iscsi_conn_to_session(cls_conn);
1424
1425 if (iscsi_is_session_online(cls_sess)) {
1426 qedi_conn->abrt_conn = 1;
1427 QEDI_ERR(&qedi->dbg_ctx,
1428 "Failing connection, state=0x%x, cid=0x%x\n",
1429 conn->session->state, qedi_conn->iscsi_conn_id);
1430 iscsi_conn_failure(qedi_conn->cls_conn->dd_data,
1431 ISCSI_ERR_CONN_FAILED);
1432 }
1433}
1434
1435static const struct {
1436 enum iscsi_error_types error_code;
1437 char *err_string;
1438} qedi_iscsi_error[] = {
1439 { ISCSI_STATUS_NONE,
1440 "tcp_error none"
1441 },
1442 { ISCSI_CONN_ERROR_TASK_CID_MISMATCH,
1443 "task cid mismatch"
1444 },
1445 { ISCSI_CONN_ERROR_TASK_NOT_VALID,
1446 "invalid task"
1447 },
1448 { ISCSI_CONN_ERROR_RQ_RING_IS_FULL,
1449 "rq ring full"
1450 },
1451 { ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL,
1452 "cmdq ring full"
1453 },
1454 { ISCSI_CONN_ERROR_HQE_CACHING_FAILED,
1455 "sge caching failed"
1456 },
1457 { ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR,
1458 "hdr digest error"
1459 },
1460 { ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
1461 "local cmpl error"
1462 },
1463 { ISCSI_CONN_ERROR_DATA_OVERRUN,
1464 "invalid task"
1465 },
1466 { ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
1467 "out of sge error"
1468 },
1469 { ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR,
1470 "tcp seg ip options error"
1471 },
1472 { ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
1473 "tcp ip fragment error"
1474 },
1475 { ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
1476 "AHS len protocol error"
1477 },
1478 { ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE,
1479 "itt out of range error"
1480 },
1481 { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE,
1482 "data seg more than pdu size"
1483 },
1484 { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE,
1485 "invalid opcode"
1486 },
1487 { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE,
1488 "invalid opcode before update"
1489 },
1490 { ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL,
1491 "unexpected opcode"
1492 },
1493 { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA,
1494 "r2t carries no data"
1495 },
1496 { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN,
1497 "data sn error"
1498 },
1499 { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT,
1500 "data TTT error"
1501 },
1502 { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT,
1503 "r2t TTT error"
1504 },
1505 { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET,
1506 "buffer offset error"
1507 },
1508 { ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO,
1509 "buffer offset ooo"
1510 },
1511 { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN,
1512 "data seg len 0"
1513 },
1514 { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0,
1515 "data xer len error"
1516 },
1517 { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1,
1518 "data xer len1 error"
1519 },
1520 { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2,
1521 "data xer len2 error"
1522 },
1523 { ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN,
1524 "protocol lun error"
1525 },
1526 { ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO,
1527 "f bit zero error"
1528 },
1529 { ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN,
1530 "exp stat sn error"
1531 },
1532 { ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO,
1533 "dsl not zero error"
1534 },
1535 { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL,
1536 "invalid dsl"
1537 },
1538 { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG,
1539 "data seg len too big"
1540 },
1541 { ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT,
1542 "outstanding r2t count error"
1543 },
1544 { ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
1545 "sense datalen error"
1546 },
1547};
1548
1549char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
1550{
1551 int i;
1552 char *msg = NULL;
1553
1554 for (i = 0; i < ARRAY_SIZE(qedi_iscsi_error); i++) {
1555 if (qedi_iscsi_error[i].error_code == err_code) {
1556 msg = qedi_iscsi_error[i].err_string;
1557 break;
1558 }
1559 }
1560 return msg;
1561}
1562
1563void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
1564{
1565 struct qedi_conn *qedi_conn;
1566 struct qedi_ctx *qedi;
1567 char warn_notice[] = "iscsi_warning";
1568 char error_notice[] = "iscsi_error";
1569 char unknown_msg[] = "Unknown error";
1570 char *message;
1571 int need_recovery = 0;
1572 u32 err_mask = 0;
1573 char *msg;
1574
1575 if (!ep)
1576 return;
1577
1578 qedi_conn = ep->conn;
1579 if (!qedi_conn)
1580 return;
1581
1582 qedi = ep->qedi;
1583
1584 QEDI_ERR(&qedi->dbg_ctx, "async event iscsi error:0x%x\n",
1585 data->error_code);
1586
1587 if (err_mask) {
1588 need_recovery = 0;
1589 message = warn_notice;
1590 } else {
1591 need_recovery = 1;
1592 message = error_notice;
1593 }
1594
1595 msg = qedi_get_iscsi_error(data->error_code);
1596 if (!msg) {
1597 need_recovery = 0;
1598 msg = unknown_msg;
1599 }
1600
1601 iscsi_conn_printk(KERN_ALERT,
1602 qedi_conn->cls_conn->dd_data,
1603 "qedi: %s - %s\n", message, msg);
1604
1605 if (need_recovery)
1606 qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
1607}
1608
1609void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data)
1610{
1611 struct qedi_conn *qedi_conn;
1612
1613 if (!ep)
1614 return;
1615
1616 qedi_conn = ep->conn;
1617 if (!qedi_conn)
1618 return;
1619
1620 QEDI_ERR(&ep->qedi->dbg_ctx, "async event TCP error:0x%x\n",
1621 data->error_code);
1622
1623 qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
1624}
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
new file mode 100644
index 000000000000..d3c06bbddb4e
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -0,0 +1,232 @@
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#ifndef _QEDI_ISCSI_H_
11#define _QEDI_ISCSI_H_
12
13#include <linux/socket.h>
14#include <linux/completion.h>
15#include "qedi.h"
16
17#define ISCSI_MAX_SESS_PER_HBA 4096
18
19#define DEF_KA_TIMEOUT 7200000
20#define DEF_KA_INTERVAL 10000
21#define DEF_KA_MAX_PROBE_COUNT 10
22#define DEF_TOS 0
23#define DEF_TTL 0xfe
24#define DEF_SND_SEQ_SCALE 0
25#define DEF_RCV_BUF 0xffff
26#define DEF_SND_BUF 0xffff
27#define DEF_SEED 0
28#define DEF_MAX_RT_TIME 8000
29#define DEF_MAX_DA_COUNT 2
30#define DEF_SWS_TIMER 1000
31#define DEF_MAX_CWND 2
32#define DEF_PATH_MTU 1500
33#define DEF_MSS 1460
34#define DEF_LL2_MTU 1560
35#define JUMBO_MTU 9000
36
37#define MIN_MTU 576 /* rfc 793 */
38#define IPV4_HDR_LEN 20
39#define IPV6_HDR_LEN 40
40#define TCP_HDR_LEN 20
41#define TCP_OPTION_LEN 12
42#define VLAN_LEN 4
43
44enum {
45 EP_STATE_IDLE = 0x0,
46 EP_STATE_ACQRCONN_START = 0x1,
47 EP_STATE_ACQRCONN_COMPL = 0x2,
48 EP_STATE_OFLDCONN_START = 0x4,
49 EP_STATE_OFLDCONN_COMPL = 0x8,
50 EP_STATE_DISCONN_START = 0x10,
51 EP_STATE_DISCONN_COMPL = 0x20,
52 EP_STATE_CLEANUP_START = 0x40,
53 EP_STATE_CLEANUP_CMPL = 0x80,
54 EP_STATE_TCP_FIN_RCVD = 0x100,
55 EP_STATE_TCP_RST_RCVD = 0x200,
56 EP_STATE_LOGOUT_SENT = 0x400,
57 EP_STATE_LOGOUT_RESP_RCVD = 0x800,
58 EP_STATE_CLEANUP_FAILED = 0x1000,
59 EP_STATE_OFLDCONN_FAILED = 0x2000,
60 EP_STATE_CONNECT_FAILED = 0x4000,
61 EP_STATE_DISCONN_TIMEDOUT = 0x8000,
62};
63
64struct qedi_conn;
65
66struct qedi_endpoint {
67 struct qedi_ctx *qedi;
68 u32 dst_addr[4];
69 u32 src_addr[4];
70 u16 src_port;
71 u16 dst_port;
72 u16 vlan_id;
73 u16 pmtu;
74 u8 src_mac[ETH_ALEN];
75 u8 dst_mac[ETH_ALEN];
76 u8 ip_type;
77 int state;
78 wait_queue_head_t ofld_wait;
79 wait_queue_head_t tcp_ofld_wait;
80 u32 iscsi_cid;
81 /* identifier of the connection from qed */
82 u32 handle;
83 u32 fw_cid;
84 void __iomem *p_doorbell;
85
86 /* Send queue management */
87 struct iscsi_wqe *sq;
88 dma_addr_t sq_dma;
89
90 u16 sq_prod_idx;
91 u16 fw_sq_prod_idx;
92 u16 sq_con_idx;
93 u32 sq_mem_size;
94
95 void *sq_pbl;
96 dma_addr_t sq_pbl_dma;
97 u32 sq_pbl_size;
98 struct qedi_conn *conn;
99 struct work_struct offload_work;
100};
101
102#define QEDI_SQ_WQES_MIN 16
103
104struct qedi_io_bdt {
105 struct iscsi_sge *sge_tbl;
106 dma_addr_t sge_tbl_dma;
107 u16 sge_valid;
108};
109
110/**
111 * struct generic_pdu_resc - login pdu resource structure
112 *
113 * @req_buf: driver buffer used to stage payload associated with
114 * the login request
115 * @req_dma_addr: dma address for iscsi login request payload buffer
116 * @req_buf_size: actual login request payload length
117 * @req_wr_ptr: pointer into login request buffer when next data is
118 * to be written
119 * @resp_hdr: iscsi header where iscsi login response header is to
120 * be recreated
121 * @resp_buf: buffer to stage login response payload
122 * @resp_dma_addr: login response payload buffer dma address
123 * @resp_buf_size: login response paylod length
124 * @resp_wr_ptr: pointer into login response buffer when next data is
125 * to be written
126 * @req_bd_tbl: iscsi login request payload BD table
127 * @req_bd_dma: login request BD table dma address
128 * @resp_bd_tbl: iscsi login response payload BD table
129 * @resp_bd_dma: login request BD table dma address
130 *
131 * following structure defines buffer info for generic pdus such as iSCSI Login,
132 * Logout and NOP
133 */
134struct generic_pdu_resc {
135 char *req_buf;
136 dma_addr_t req_dma_addr;
137 u32 req_buf_size;
138 char *req_wr_ptr;
139 struct iscsi_hdr resp_hdr;
140 char *resp_buf;
141 dma_addr_t resp_dma_addr;
142 u32 resp_buf_size;
143 char *resp_wr_ptr;
144 char *req_bd_tbl;
145 dma_addr_t req_bd_dma;
146 char *resp_bd_tbl;
147 dma_addr_t resp_bd_dma;
148};
149
150struct qedi_conn {
151 struct iscsi_cls_conn *cls_conn;
152 struct qedi_ctx *qedi;
153 struct qedi_endpoint *ep;
154 struct list_head active_cmd_list;
155 spinlock_t list_lock; /* internal conn lock */
156 u32 active_cmd_count;
157 u32 cmd_cleanup_req;
158 u32 cmd_cleanup_cmpl;
159
160 u32 iscsi_conn_id;
161 int itt;
162 int abrt_conn;
163#define QEDI_CID_RESERVED 0x5AFF
164 u32 fw_cid;
165 /*
166 * Buffer for login negotiation process
167 */
168 struct generic_pdu_resc gen_pdu;
169
170 struct list_head tmf_work_list;
171 wait_queue_head_t wait_queue;
172 spinlock_t tmf_work_lock; /* tmf work lock */
173 unsigned long flags;
174#define QEDI_CONN_FW_CLEANUP 1
175};
176
177struct qedi_cmd {
178 struct list_head io_cmd;
179 bool io_cmd_in_list;
180 struct iscsi_hdr hdr;
181 struct qedi_conn *conn;
182 struct scsi_cmnd *scsi_cmd;
183 struct scatterlist *sg;
184 struct qedi_io_bdt io_tbl;
185 struct iscsi_task_context request;
186 unsigned char *sense_buffer;
187 dma_addr_t sense_buffer_dma;
188 u16 task_id;
189
190 /* field populated for tmf work queue */
191 struct iscsi_task *task;
192 struct work_struct tmf_work;
193 int state;
194#define CLEANUP_WAIT 1
195#define CLEANUP_RECV 2
196#define CLEANUP_WAIT_FAILED 3
197#define CLEANUP_NOT_REQUIRED 4
198#define LUN_RESET_RESPONSE_RECEIVED 5
199#define RESPONSE_RECEIVED 6
200
201 int type;
202#define TYPEIO 1
203#define TYPERESET 2
204
205 struct qedi_work_map *list_tmf_work;
206 /* slowpath management */
207 bool use_slowpath;
208
209 struct iscsi_tm_rsp *tmf_resp_buf;
210 struct qedi_work cqe_work;
211};
212
213struct qedi_work_map {
214 struct list_head list;
215 struct qedi_cmd *qedi_cmd;
216 int rtid;
217
218 int state;
219#define QEDI_WORK_QUEUED 1
220#define QEDI_WORK_SCHEDULED 2
221#define QEDI_WORK_EXIT 3
222
223 struct work_struct *ptr_tmf_work;
224};
225
226#define qedi_set_itt(task_id, itt) ((u32)(((task_id) & 0xffff) | ((itt) << 16)))
227#define qedi_get_itt(cqe) (cqe.iscsi_hdr.cmd.itt >> 16)
228
229#define QEDI_OFLD_WAIT_STATE(q) ((q)->state == EP_STATE_OFLDCONN_FAILED || \
230 (q)->state == EP_STATE_OFLDCONN_COMPL)
231
232#endif /* _QEDI_ISCSI_H_ */
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
new file mode 100644
index 000000000000..19ead8d17e55
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -0,0 +1,2127 @@
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/kernel.h>
13#include <linux/if_arp.h>
14#include <scsi/iscsi_if.h>
15#include <linux/inet.h>
16#include <net/arp.h>
17#include <linux/list.h>
18#include <linux/kthread.h>
19#include <linux/mm.h>
20#include <linux/if_vlan.h>
21#include <linux/cpu.h>
22
23#include <scsi/scsi_cmnd.h>
24#include <scsi/scsi_device.h>
25#include <scsi/scsi_eh.h>
26#include <scsi/scsi_host.h>
27#include <scsi/scsi.h>
28
29#include "qedi.h"
30#include "qedi_gbl.h"
31#include "qedi_iscsi.h"
32
33static uint qedi_fw_debug;
34module_param(qedi_fw_debug, uint, 0644);
35MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3");
36
37uint qedi_dbg_log = QEDI_LOG_WARN | QEDI_LOG_SCSI_TM;
38module_param(qedi_dbg_log, uint, 0644);
39MODULE_PARM_DESC(qedi_dbg_log, " Default debug level");
40
41uint qedi_io_tracing;
42module_param(qedi_io_tracing, uint, 0644);
43MODULE_PARM_DESC(qedi_io_tracing,
44 " Enable logging of SCSI requests/completions into trace buffer. (default off).");
45
46const struct qed_iscsi_ops *qedi_ops;
47static struct scsi_transport_template *qedi_scsi_transport;
48static struct pci_driver qedi_pci_driver;
49static DEFINE_PER_CPU(struct qedi_percpu_s, qedi_percpu);
50static LIST_HEAD(qedi_udev_list);
51/* Static function declaration */
52static int qedi_alloc_global_queues(struct qedi_ctx *qedi);
53static void qedi_free_global_queues(struct qedi_ctx *qedi);
54static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid);
55static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
56static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
57
58static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
59{
60 struct qedi_ctx *qedi;
61 struct qedi_endpoint *qedi_ep;
62 struct async_data *data;
63 int rval = 0;
64
65 if (!context || !fw_handle) {
66 QEDI_ERR(NULL, "Recv event with ctx NULL\n");
67 return -EINVAL;
68 }
69
70 qedi = (struct qedi_ctx *)context;
71 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
72 "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
73
74 data = (struct async_data *)fw_handle;
75 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
76 "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
77 data->cid, data->itid, data->error_code,
78 data->fw_debug_param);
79
80 qedi_ep = qedi->ep_tbl[data->cid];
81
82 if (!qedi_ep) {
83 QEDI_WARN(&qedi->dbg_ctx,
84 "Cannot process event, ep already disconnected, cid=0x%x\n",
85 data->cid);
86 WARN_ON(1);
87 return -ENODEV;
88 }
89
90 switch (fw_event_code) {
91 case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
92 if (qedi_ep->state == EP_STATE_OFLDCONN_START)
93 qedi_ep->state = EP_STATE_OFLDCONN_COMPL;
94
95 wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
96 break;
97 case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
98 qedi_ep->state = EP_STATE_DISCONN_COMPL;
99 wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
100 break;
101 case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
102 qedi_process_iscsi_error(qedi_ep, data);
103 break;
104 case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
105 case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
106 case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
107 case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
108 case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
109 case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
110 case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
111 qedi_process_tcp_error(qedi_ep, data);
112 break;
113 default:
114 QEDI_ERR(&qedi->dbg_ctx, "Recv Unknown Event %u\n",
115 fw_event_code);
116 }
117
118 return rval;
119}
120
121static int qedi_uio_open(struct uio_info *uinfo, struct inode *inode)
122{
123 struct qedi_uio_dev *udev = uinfo->priv;
124 struct qedi_ctx *qedi = udev->qedi;
125
126 if (!capable(CAP_NET_ADMIN))
127 return -EPERM;
128
129 if (udev->uio_dev != -1)
130 return -EBUSY;
131
132 rtnl_lock();
133 udev->uio_dev = iminor(inode);
134 qedi_reset_uio_rings(udev);
135 set_bit(UIO_DEV_OPENED, &qedi->flags);
136 rtnl_unlock();
137
138 return 0;
139}
140
141static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode)
142{
143 struct qedi_uio_dev *udev = uinfo->priv;
144 struct qedi_ctx *qedi = udev->qedi;
145
146 udev->uio_dev = -1;
147 clear_bit(UIO_DEV_OPENED, &qedi->flags);
148 qedi_ll2_free_skbs(qedi);
149 return 0;
150}
151
152static void __qedi_free_uio_rings(struct qedi_uio_dev *udev)
153{
154 if (udev->ll2_ring) {
155 free_page((unsigned long)udev->ll2_ring);
156 udev->ll2_ring = NULL;
157 }
158
159 if (udev->ll2_buf) {
160 free_pages((unsigned long)udev->ll2_buf, 2);
161 udev->ll2_buf = NULL;
162 }
163}
164
165static void __qedi_free_uio(struct qedi_uio_dev *udev)
166{
167 uio_unregister_device(&udev->qedi_uinfo);
168
169 __qedi_free_uio_rings(udev);
170
171 pci_dev_put(udev->pdev);
172 kfree(udev->uctrl);
173 kfree(udev);
174}
175
176static void qedi_free_uio(struct qedi_uio_dev *udev)
177{
178 if (!udev)
179 return;
180
181 list_del_init(&udev->list);
182 __qedi_free_uio(udev);
183}
184
185static void qedi_reset_uio_rings(struct qedi_uio_dev *udev)
186{
187 struct qedi_ctx *qedi = NULL;
188 struct qedi_uio_ctrl *uctrl = NULL;
189
190 qedi = udev->qedi;
191 uctrl = udev->uctrl;
192
193 spin_lock_bh(&qedi->ll2_lock);
194 uctrl->host_rx_cons = 0;
195 uctrl->hw_rx_prod = 0;
196 uctrl->hw_rx_bd_prod = 0;
197 uctrl->host_rx_bd_cons = 0;
198
199 memset(udev->ll2_ring, 0, udev->ll2_ring_size);
200 memset(udev->ll2_buf, 0, udev->ll2_buf_size);
201 spin_unlock_bh(&qedi->ll2_lock);
202}
203
204static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
205{
206 int rc = 0;
207
208 if (udev->ll2_ring || udev->ll2_buf)
209 return rc;
210
211 /* Allocating memory for LL2 ring */
212 udev->ll2_ring_size = QEDI_PAGE_SIZE;
213 udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP);
214 if (!udev->ll2_ring) {
215 rc = -ENOMEM;
216 goto exit_alloc_ring;
217 }
218
219 /* Allocating memory for Tx/Rx pkt buffer */
220 udev->ll2_buf_size = TX_RX_RING * LL2_SINGLE_BUF_SIZE;
221 udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size);
222 udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP |
223 __GFP_ZERO, 2);
224 if (!udev->ll2_buf) {
225 rc = -ENOMEM;
226 goto exit_alloc_buf;
227 }
228 return rc;
229
230exit_alloc_buf:
231 free_page((unsigned long)udev->ll2_ring);
232 udev->ll2_ring = NULL;
233exit_alloc_ring:
234 return rc;
235}
236
237static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
238{
239 struct qedi_uio_dev *udev = NULL;
240 struct qedi_uio_ctrl *uctrl = NULL;
241 int rc = 0;
242
243 list_for_each_entry(udev, &qedi_udev_list, list) {
244 if (udev->pdev == qedi->pdev) {
245 udev->qedi = qedi;
246 if (__qedi_alloc_uio_rings(udev)) {
247 udev->qedi = NULL;
248 return -ENOMEM;
249 }
250 qedi->udev = udev;
251 return 0;
252 }
253 }
254
255 udev = kzalloc(sizeof(*udev), GFP_KERNEL);
256 if (!udev) {
257 rc = -ENOMEM;
258 goto err_udev;
259 }
260
261 uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL);
262 if (!uctrl) {
263 rc = -ENOMEM;
264 goto err_uctrl;
265 }
266
267 udev->uio_dev = -1;
268
269 udev->qedi = qedi;
270 udev->pdev = qedi->pdev;
271 udev->uctrl = uctrl;
272
273 rc = __qedi_alloc_uio_rings(udev);
274 if (rc)
275 goto err_uio_rings;
276
277 list_add(&udev->list, &qedi_udev_list);
278
279 pci_dev_get(udev->pdev);
280 qedi->udev = udev;
281
282 udev->tx_pkt = udev->ll2_buf;
283 udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
284 return 0;
285
286 err_uio_rings:
287 kfree(uctrl);
288 err_uctrl:
289 kfree(udev);
290 err_udev:
291 return -ENOMEM;
292}
293
294static int qedi_init_uio(struct qedi_ctx *qedi)
295{
296 struct qedi_uio_dev *udev = qedi->udev;
297 struct uio_info *uinfo;
298 int ret = 0;
299
300 if (!udev)
301 return -ENOMEM;
302
303 uinfo = &udev->qedi_uinfo;
304
305 uinfo->mem[0].addr = (unsigned long)udev->uctrl;
306 uinfo->mem[0].size = sizeof(struct qedi_uio_ctrl);
307 uinfo->mem[0].memtype = UIO_MEM_LOGICAL;
308
309 uinfo->mem[1].addr = (unsigned long)udev->ll2_ring;
310 uinfo->mem[1].size = udev->ll2_ring_size;
311 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
312
313 uinfo->mem[2].addr = (unsigned long)udev->ll2_buf;
314 uinfo->mem[2].size = udev->ll2_buf_size;
315 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
316
317 uinfo->name = "qedi_uio";
318 uinfo->version = QEDI_MODULE_VERSION;
319 uinfo->irq = UIO_IRQ_CUSTOM;
320
321 uinfo->open = qedi_uio_open;
322 uinfo->release = qedi_uio_close;
323
324 if (udev->uio_dev == -1) {
325 if (!uinfo->priv) {
326 uinfo->priv = udev;
327
328 ret = uio_register_device(&udev->pdev->dev, uinfo);
329 if (ret) {
330 QEDI_ERR(&qedi->dbg_ctx,
331 "UIO registration failed\n");
332 }
333 }
334 }
335
336 return ret;
337}
338
339static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
340 struct qed_sb_info *sb_info, u16 sb_id)
341{
342 struct status_block *sb_virt;
343 dma_addr_t sb_phys;
344 int ret;
345
346 sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
347 sizeof(struct status_block), &sb_phys,
348 GFP_KERNEL);
349 if (!sb_virt) {
350 QEDI_ERR(&qedi->dbg_ctx,
351 "Status block allocation failed for id = %d.\n",
352 sb_id);
353 return -ENOMEM;
354 }
355
356 ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys,
357 sb_id, QED_SB_TYPE_STORAGE);
358 if (ret) {
359 QEDI_ERR(&qedi->dbg_ctx,
360 "Status block initialization failed for id = %d.\n",
361 sb_id);
362 return ret;
363 }
364
365 return 0;
366}
367
368static void qedi_free_sb(struct qedi_ctx *qedi)
369{
370 struct qed_sb_info *sb_info;
371 int id;
372
373 for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
374 sb_info = &qedi->sb_array[id];
375 if (sb_info->sb_virt)
376 dma_free_coherent(&qedi->pdev->dev,
377 sizeof(*sb_info->sb_virt),
378 (void *)sb_info->sb_virt,
379 sb_info->sb_phys);
380 }
381}
382
383static void qedi_free_fp(struct qedi_ctx *qedi)
384{
385 kfree(qedi->fp_array);
386 kfree(qedi->sb_array);
387}
388
389static void qedi_destroy_fp(struct qedi_ctx *qedi)
390{
391 qedi_free_sb(qedi);
392 qedi_free_fp(qedi);
393}
394
395static int qedi_alloc_fp(struct qedi_ctx *qedi)
396{
397 int ret = 0;
398
399 qedi->fp_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
400 sizeof(struct qedi_fastpath), GFP_KERNEL);
401 if (!qedi->fp_array) {
402 QEDI_ERR(&qedi->dbg_ctx,
403 "fastpath fp array allocation failed.\n");
404 return -ENOMEM;
405 }
406
407 qedi->sb_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
408 sizeof(struct qed_sb_info), GFP_KERNEL);
409 if (!qedi->sb_array) {
410 QEDI_ERR(&qedi->dbg_ctx,
411 "fastpath sb array allocation failed.\n");
412 ret = -ENOMEM;
413 goto free_fp;
414 }
415
416 return ret;
417
418free_fp:
419 qedi_free_fp(qedi);
420 return ret;
421}
422
423static void qedi_int_fp(struct qedi_ctx *qedi)
424{
425 struct qedi_fastpath *fp;
426 int id;
427
428 memset(qedi->fp_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
429 sizeof(*qedi->fp_array));
430 memset(qedi->sb_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
431 sizeof(*qedi->sb_array));
432
433 for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
434 fp = &qedi->fp_array[id];
435 fp->sb_info = &qedi->sb_array[id];
436 fp->sb_id = id;
437 fp->qedi = qedi;
438 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
439 "qedi", id);
440
441 /* fp_array[i] ---- irq cookie
442 * So init data which is needed in int ctx
443 */
444 }
445}
446
447static int qedi_prepare_fp(struct qedi_ctx *qedi)
448{
449 struct qedi_fastpath *fp;
450 int id, ret = 0;
451
452 ret = qedi_alloc_fp(qedi);
453 if (ret)
454 goto err;
455
456 qedi_int_fp(qedi);
457
458 for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
459 fp = &qedi->fp_array[id];
460 ret = qedi_alloc_and_init_sb(qedi, fp->sb_info, fp->sb_id);
461 if (ret) {
462 QEDI_ERR(&qedi->dbg_ctx,
463 "SB allocation and initialization failed.\n");
464 ret = -EIO;
465 goto err_init;
466 }
467 }
468
469 return 0;
470
471err_init:
472 qedi_free_sb(qedi);
473 qedi_free_fp(qedi);
474err:
475 return ret;
476}
477
478static int qedi_setup_cid_que(struct qedi_ctx *qedi)
479{
480 int i;
481
482 qedi->cid_que.cid_que_base = kmalloc_array(qedi->max_active_conns,
483 sizeof(u32), GFP_KERNEL);
484 if (!qedi->cid_que.cid_que_base)
485 return -ENOMEM;
486
487 qedi->cid_que.conn_cid_tbl = kmalloc_array(qedi->max_active_conns,
488 sizeof(struct qedi_conn *),
489 GFP_KERNEL);
490 if (!qedi->cid_que.conn_cid_tbl) {
491 kfree(qedi->cid_que.cid_que_base);
492 qedi->cid_que.cid_que_base = NULL;
493 return -ENOMEM;
494 }
495
496 qedi->cid_que.cid_que = (u32 *)qedi->cid_que.cid_que_base;
497 qedi->cid_que.cid_q_prod_idx = 0;
498 qedi->cid_que.cid_q_cons_idx = 0;
499 qedi->cid_que.cid_q_max_idx = qedi->max_active_conns;
500 qedi->cid_que.cid_free_cnt = qedi->max_active_conns;
501
502 for (i = 0; i < qedi->max_active_conns; i++) {
503 qedi->cid_que.cid_que[i] = i;
504 qedi->cid_que.conn_cid_tbl[i] = NULL;
505 }
506
507 return 0;
508}
509
510static void qedi_release_cid_que(struct qedi_ctx *qedi)
511{
512 kfree(qedi->cid_que.cid_que_base);
513 qedi->cid_que.cid_que_base = NULL;
514
515 kfree(qedi->cid_que.conn_cid_tbl);
516 qedi->cid_que.conn_cid_tbl = NULL;
517}
518
519static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size,
520 u16 start_id, u16 next)
521{
522 id_tbl->start = start_id;
523 id_tbl->max = size;
524 id_tbl->next = next;
525 spin_lock_init(&id_tbl->lock);
526 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
527 if (!id_tbl->table)
528 return -ENOMEM;
529
530 return 0;
531}
532
533static void qedi_free_id_tbl(struct qedi_portid_tbl *id_tbl)
534{
535 kfree(id_tbl->table);
536 id_tbl->table = NULL;
537}
538
539int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id)
540{
541 int ret = -1;
542
543 id -= id_tbl->start;
544 if (id >= id_tbl->max)
545 return ret;
546
547 spin_lock(&id_tbl->lock);
548 if (!test_bit(id, id_tbl->table)) {
549 set_bit(id, id_tbl->table);
550 ret = 0;
551 }
552 spin_unlock(&id_tbl->lock);
553 return ret;
554}
555
556u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl)
557{
558 u16 id;
559
560 spin_lock(&id_tbl->lock);
561 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
562 if (id >= id_tbl->max) {
563 id = QEDI_LOCAL_PORT_INVALID;
564 if (id_tbl->next != 0) {
565 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
566 if (id >= id_tbl->next)
567 id = QEDI_LOCAL_PORT_INVALID;
568 }
569 }
570
571 if (id < id_tbl->max) {
572 set_bit(id, id_tbl->table);
573 id_tbl->next = (id + 1) & (id_tbl->max - 1);
574 id += id_tbl->start;
575 }
576
577 spin_unlock(&id_tbl->lock);
578
579 return id;
580}
581
582void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id)
583{
584 if (id == QEDI_LOCAL_PORT_INVALID)
585 return;
586
587 id -= id_tbl->start;
588 if (id >= id_tbl->max)
589 return;
590
591 clear_bit(id, id_tbl->table);
592}
593
594static void qedi_cm_free_mem(struct qedi_ctx *qedi)
595{
596 kfree(qedi->ep_tbl);
597 qedi->ep_tbl = NULL;
598 qedi_free_id_tbl(&qedi->lcl_port_tbl);
599}
600
601static int qedi_cm_alloc_mem(struct qedi_ctx *qedi)
602{
603 u16 port_id;
604
605 qedi->ep_tbl = kzalloc((qedi->max_active_conns *
606 sizeof(struct qedi_endpoint *)), GFP_KERNEL);
607 if (!qedi->ep_tbl)
608 return -ENOMEM;
609 port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE;
610 if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE,
611 QEDI_LOCAL_PORT_MIN, port_id)) {
612 qedi_cm_free_mem(qedi);
613 return -ENOMEM;
614 }
615
616 return 0;
617}
618
619static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev)
620{
621 struct Scsi_Host *shost;
622 struct qedi_ctx *qedi = NULL;
623
624 shost = iscsi_host_alloc(&qedi_host_template,
625 sizeof(struct qedi_ctx), 0);
626 if (!shost) {
627 QEDI_ERR(NULL, "Could not allocate shost\n");
628 goto exit_setup_shost;
629 }
630
631 shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA;
632 shost->max_channel = 0;
633 shost->max_lun = ~0;
634 shost->max_cmd_len = 16;
635 shost->transportt = qedi_scsi_transport;
636
637 qedi = iscsi_host_priv(shost);
638 memset(qedi, 0, sizeof(*qedi));
639 qedi->shost = shost;
640 qedi->dbg_ctx.host_no = shost->host_no;
641 qedi->pdev = pdev;
642 qedi->dbg_ctx.pdev = pdev;
643 qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA;
644 qedi->max_sqes = QEDI_SQ_SIZE;
645
646 if (shost_use_blk_mq(shost))
647 shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi);
648
649 pci_set_drvdata(pdev, qedi);
650
651exit_setup_shost:
652 return qedi;
653}
654
655static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
656{
657 struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
658 struct qedi_uio_dev *udev;
659 struct qedi_uio_ctrl *uctrl;
660 struct skb_work_list *work;
661 u32 prod;
662
663 if (!qedi) {
664 QEDI_ERR(NULL, "qedi is NULL\n");
665 return -1;
666 }
667
668 if (!test_bit(UIO_DEV_OPENED, &qedi->flags)) {
669 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UIO,
670 "UIO DEV is not opened\n");
671 kfree_skb(skb);
672 return 0;
673 }
674
675 udev = qedi->udev;
676 uctrl = udev->uctrl;
677
678 work = kzalloc(sizeof(*work), GFP_ATOMIC);
679 if (!work) {
680 QEDI_WARN(&qedi->dbg_ctx,
681 "Could not allocate work so dropping frame.\n");
682 kfree_skb(skb);
683 return 0;
684 }
685
686 INIT_LIST_HEAD(&work->list);
687 work->skb = skb;
688
689 if (skb_vlan_tag_present(skb))
690 work->vlan_id = skb_vlan_tag_get(skb);
691
692 if (work->vlan_id)
693 __vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id);
694
695 spin_lock_bh(&qedi->ll2_lock);
696 list_add_tail(&work->list, &qedi->ll2_skb_list);
697
698 ++uctrl->hw_rx_prod_cnt;
699 prod = (uctrl->hw_rx_prod + 1) % RX_RING;
700 if (prod != uctrl->host_rx_cons) {
701 uctrl->hw_rx_prod = prod;
702 spin_unlock_bh(&qedi->ll2_lock);
703 wake_up_process(qedi->ll2_recv_thread);
704 return 0;
705 }
706
707 spin_unlock_bh(&qedi->ll2_lock);
708 return 0;
709}
710
711/* map this skb to iscsiuio mmaped region */
712static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
713 u16 vlan_id)
714{
715 struct qedi_uio_dev *udev = NULL;
716 struct qedi_uio_ctrl *uctrl = NULL;
717 struct qedi_rx_bd rxbd;
718 struct qedi_rx_bd *p_rxbd;
719 u32 rx_bd_prod;
720 void *pkt;
721 int len = 0;
722
723 if (!qedi) {
724 QEDI_ERR(NULL, "qedi is NULL\n");
725 return -1;
726 }
727
728 udev = qedi->udev;
729 uctrl = udev->uctrl;
730 pkt = udev->rx_pkt + (uctrl->hw_rx_prod * LL2_SINGLE_BUF_SIZE);
731 len = min_t(u32, skb->len, (u32)LL2_SINGLE_BUF_SIZE);
732 memcpy(pkt, skb->data, len);
733
734 memset(&rxbd, 0, sizeof(rxbd));
735 rxbd.rx_pkt_index = uctrl->hw_rx_prod;
736 rxbd.rx_pkt_len = len;
737 rxbd.vlan_id = vlan_id;
738
739 uctrl->hw_rx_bd_prod = (uctrl->hw_rx_bd_prod + 1) % QEDI_NUM_RX_BD;
740 rx_bd_prod = uctrl->hw_rx_bd_prod;
741 p_rxbd = (struct qedi_rx_bd *)udev->ll2_ring;
742 p_rxbd += rx_bd_prod;
743
744 memcpy(p_rxbd, &rxbd, sizeof(rxbd));
745
746 /* notify the iscsiuio about new packet */
747 uio_event_notify(&udev->qedi_uinfo);
748
749 return 0;
750}
751
752static void qedi_ll2_free_skbs(struct qedi_ctx *qedi)
753{
754 struct skb_work_list *work, *work_tmp;
755
756 spin_lock_bh(&qedi->ll2_lock);
757 list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) {
758 list_del(&work->list);
759 if (work->skb)
760 kfree_skb(work->skb);
761 kfree(work);
762 }
763 spin_unlock_bh(&qedi->ll2_lock);
764}
765
766static int qedi_ll2_recv_thread(void *arg)
767{
768 struct qedi_ctx *qedi = (struct qedi_ctx *)arg;
769 struct skb_work_list *work, *work_tmp;
770
771 set_user_nice(current, -20);
772
773 while (!kthread_should_stop()) {
774 spin_lock_bh(&qedi->ll2_lock);
775 list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list,
776 list) {
777 list_del(&work->list);
778 qedi_ll2_process_skb(qedi, work->skb, work->vlan_id);
779 kfree_skb(work->skb);
780 kfree(work);
781 }
782 set_current_state(TASK_INTERRUPTIBLE);
783 spin_unlock_bh(&qedi->ll2_lock);
784 schedule();
785 }
786
787 __set_current_state(TASK_RUNNING);
788 return 0;
789}
790
791static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
792{
793 u8 num_sq_pages;
794 u32 log_page_size;
795 int rval = 0;
796
797 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "Min number of MSIX %d\n",
798 MIN_NUM_CPUS_MSIX(qedi));
799
800 num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE;
801
802 qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi);
803
804 memset(&qedi->pf_params.iscsi_pf_params, 0,
805 sizeof(qedi->pf_params.iscsi_pf_params));
806
807 qedi->p_cpuq = pci_alloc_consistent(qedi->pdev,
808 qedi->num_queues * sizeof(struct qedi_glbl_q_params),
809 &qedi->hw_p_cpuq);
810 if (!qedi->p_cpuq) {
811 QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n");
812 rval = -1;
813 goto err_alloc_mem;
814 }
815
816 rval = qedi_alloc_global_queues(qedi);
817 if (rval) {
818 QEDI_ERR(&qedi->dbg_ctx, "Global queue allocation failed.\n");
819 rval = -1;
820 goto err_alloc_mem;
821 }
822
823 qedi->pf_params.iscsi_pf_params.num_cons = QEDI_MAX_ISCSI_CONNS_PER_HBA;
824 qedi->pf_params.iscsi_pf_params.num_tasks = QEDI_MAX_ISCSI_TASK;
825 qedi->pf_params.iscsi_pf_params.half_way_close_timeout = 10;
826 qedi->pf_params.iscsi_pf_params.num_sq_pages_in_ring = num_sq_pages;
827 qedi->pf_params.iscsi_pf_params.num_r2tq_pages_in_ring = num_sq_pages;
828 qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
829 qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
830 qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
831
832 for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
833 if ((1 << log_page_size) == PAGE_SIZE)
834 break;
835 }
836 qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size;
837
838 qedi->pf_params.iscsi_pf_params.glbl_q_params_addr =
839 (u64)qedi->hw_p_cpuq;
840
841 /* RQ BDQ initializations.
842 * rq_num_entries: suggested value for Initiator is 16 (4KB RQ)
843 * rqe_log_size: 8 for 256B RQE
844 */
845 qedi->pf_params.iscsi_pf_params.rqe_log_size = 8;
846 /* BDQ address and size */
847 qedi->pf_params.iscsi_pf_params.bdq_pbl_base_addr[BDQ_ID_RQ] =
848 qedi->bdq_pbl_list_dma;
849 qedi->pf_params.iscsi_pf_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
850 qedi->bdq_pbl_list_num_entries;
851 qedi->pf_params.iscsi_pf_params.rq_buffer_size = QEDI_BDQ_BUF_SIZE;
852
853 /* cq_num_entries: num_tasks + rq_num_entries */
854 qedi->pf_params.iscsi_pf_params.cq_num_entries = 2048;
855
856 qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
857 qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
858 qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
859
860err_alloc_mem:
861 return rval;
862}
863
864/* Free DMA coherent memory for array of queue pointers we pass to qed */
865static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
866{
867 size_t size = 0;
868
869 if (qedi->p_cpuq) {
870 size = qedi->num_queues * sizeof(struct qedi_glbl_q_params);
871 pci_free_consistent(qedi->pdev, size, qedi->p_cpuq,
872 qedi->hw_p_cpuq);
873 }
874
875 qedi_free_global_queues(qedi);
876
877 kfree(qedi->global_queues);
878}
879
880static void qedi_link_update(void *dev, struct qed_link_output *link)
881{
882 struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
883
884 if (link->link_up) {
885 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Link Up event.\n");
886 atomic_set(&qedi->link_state, QEDI_LINK_UP);
887 } else {
888 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
889 "Link Down event.\n");
890 atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
891 }
892}
893
894static struct qed_iscsi_cb_ops qedi_cb_ops = {
895 {
896 .link_update = qedi_link_update,
897 }
898};
899
900static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe,
901 u16 que_idx, struct qedi_percpu_s *p)
902{
903 struct qedi_work *qedi_work;
904 struct qedi_conn *q_conn;
905 struct iscsi_conn *conn;
906 struct qedi_cmd *qedi_cmd;
907 u32 iscsi_cid;
908 int rc = 0;
909
910 iscsi_cid = cqe->cqe_common.conn_id;
911 q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
912 if (!q_conn) {
913 QEDI_WARN(&qedi->dbg_ctx,
914 "Session no longer exists for cid=0x%x!!\n",
915 iscsi_cid);
916 return -1;
917 }
918 conn = q_conn->cls_conn->dd_data;
919
920 switch (cqe->cqe_common.cqe_type) {
921 case ISCSI_CQE_TYPE_SOLICITED:
922 case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
923 qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid);
924 if (!qedi_cmd) {
925 rc = -1;
926 break;
927 }
928 INIT_LIST_HEAD(&qedi_cmd->cqe_work.list);
929 qedi_cmd->cqe_work.qedi = qedi;
930 memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe));
931 qedi_cmd->cqe_work.que_idx = que_idx;
932 qedi_cmd->cqe_work.is_solicited = true;
933 list_add_tail(&qedi_cmd->cqe_work.list, &p->work_list);
934 break;
935 case ISCSI_CQE_TYPE_UNSOLICITED:
936 case ISCSI_CQE_TYPE_DUMMY:
937 case ISCSI_CQE_TYPE_TASK_CLEANUP:
938 qedi_work = kzalloc(sizeof(*qedi_work), GFP_ATOMIC);
939 if (!qedi_work) {
940 rc = -1;
941 break;
942 }
943 INIT_LIST_HEAD(&qedi_work->list);
944 qedi_work->qedi = qedi;
945 memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe));
946 qedi_work->que_idx = que_idx;
947 qedi_work->is_solicited = false;
948 list_add_tail(&qedi_work->list, &p->work_list);
949 break;
950 default:
951 rc = -1;
952 QEDI_ERR(&qedi->dbg_ctx, "FW Error cqe.\n");
953 }
954 return rc;
955}
956
957static bool qedi_process_completions(struct qedi_fastpath *fp)
958{
959 struct qedi_ctx *qedi = fp->qedi;
960 struct qed_sb_info *sb_info = fp->sb_info;
961 struct status_block *sb = sb_info->sb_virt;
962 struct qedi_percpu_s *p = NULL;
963 struct global_queue *que;
964 u16 prod_idx;
965 unsigned long flags;
966 union iscsi_cqe *cqe;
967 int cpu;
968 int ret;
969
970 /* Get the current firmware producer index */
971 prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
972
973 if (prod_idx >= QEDI_CQ_SIZE)
974 prod_idx = prod_idx % QEDI_CQ_SIZE;
975
976 que = qedi->global_queues[fp->sb_id];
977 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
978 "Before: global queue=%p prod_idx=%d cons_idx=%d, sb_id=%d\n",
979 que, prod_idx, que->cq_cons_idx, fp->sb_id);
980
981 qedi->intr_cpu = fp->sb_id;
982 cpu = smp_processor_id();
983 p = &per_cpu(qedi_percpu, cpu);
984
985 if (unlikely(!p->iothread))
986 WARN_ON(1);
987
988 spin_lock_irqsave(&p->p_work_lock, flags);
989 while (que->cq_cons_idx != prod_idx) {
990 cqe = &que->cq[que->cq_cons_idx];
991
992 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
993 "cqe=%p prod_idx=%d cons_idx=%d.\n",
994 cqe, prod_idx, que->cq_cons_idx);
995
996 ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
997 if (ret)
998 continue;
999
1000 que->cq_cons_idx++;
1001 if (que->cq_cons_idx == QEDI_CQ_SIZE)
1002 que->cq_cons_idx = 0;
1003 }
1004 wake_up_process(p->iothread);
1005 spin_unlock_irqrestore(&p->p_work_lock, flags);
1006
1007 return true;
1008}
1009
1010static bool qedi_fp_has_work(struct qedi_fastpath *fp)
1011{
1012 struct qedi_ctx *qedi = fp->qedi;
1013 struct global_queue *que;
1014 struct qed_sb_info *sb_info = fp->sb_info;
1015 struct status_block *sb = sb_info->sb_virt;
1016 u16 prod_idx;
1017
1018 barrier();
1019
1020 /* Get the current firmware producer index */
1021 prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
1022
1023 /* Get the pointer to the global CQ this completion is on */
1024 que = qedi->global_queues[fp->sb_id];
1025
1026 /* prod idx wrap around uint16 */
1027 if (prod_idx >= QEDI_CQ_SIZE)
1028 prod_idx = prod_idx % QEDI_CQ_SIZE;
1029
1030 return (que->cq_cons_idx != prod_idx);
1031}
1032
1033/* MSI-X fastpath handler code */
1034static irqreturn_t qedi_msix_handler(int irq, void *dev_id)
1035{
1036 struct qedi_fastpath *fp = dev_id;
1037 struct qedi_ctx *qedi = fp->qedi;
1038 bool wake_io_thread = true;
1039
1040 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
1041
1042process_again:
1043 wake_io_thread = qedi_process_completions(fp);
1044 if (wake_io_thread) {
1045 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
1046 "process already running\n");
1047 }
1048
1049 if (qedi_fp_has_work(fp) == 0)
1050 qed_sb_update_sb_idx(fp->sb_info);
1051
1052 /* Check for more work */
1053 rmb();
1054
1055 if (qedi_fp_has_work(fp) == 0)
1056 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
1057 else
1058 goto process_again;
1059
1060 return IRQ_HANDLED;
1061}
1062
1063/* simd handler for MSI/INTa */
1064static void qedi_simd_int_handler(void *cookie)
1065{
1066 /* Cookie is qedi_ctx struct */
1067 struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
1068
1069 QEDI_WARN(&qedi->dbg_ctx, "qedi=%p.\n", qedi);
1070}
1071
1072#define QEDI_SIMD_HANDLER_NUM 0
1073static void qedi_sync_free_irqs(struct qedi_ctx *qedi)
1074{
1075 int i;
1076
1077 if (qedi->int_info.msix_cnt) {
1078 for (i = 0; i < qedi->int_info.used_cnt; i++) {
1079 synchronize_irq(qedi->int_info.msix[i].vector);
1080 irq_set_affinity_hint(qedi->int_info.msix[i].vector,
1081 NULL);
1082 free_irq(qedi->int_info.msix[i].vector,
1083 &qedi->fp_array[i]);
1084 }
1085 } else {
1086 qedi_ops->common->simd_handler_clean(qedi->cdev,
1087 QEDI_SIMD_HANDLER_NUM);
1088 }
1089
1090 qedi->int_info.used_cnt = 0;
1091 qedi_ops->common->set_fp_int(qedi->cdev, 0);
1092}
1093
1094static int qedi_request_msix_irq(struct qedi_ctx *qedi)
1095{
1096 int i, rc, cpu;
1097
1098 cpu = cpumask_first(cpu_online_mask);
1099 for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) {
1100 rc = request_irq(qedi->int_info.msix[i].vector,
1101 qedi_msix_handler, 0, "qedi",
1102 &qedi->fp_array[i]);
1103
1104 if (rc) {
1105 QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n");
1106 qedi_sync_free_irqs(qedi);
1107 return rc;
1108 }
1109 qedi->int_info.used_cnt++;
1110 rc = irq_set_affinity_hint(qedi->int_info.msix[i].vector,
1111 get_cpu_mask(cpu));
1112 cpu = cpumask_next(cpu, cpu_online_mask);
1113 }
1114
1115 return 0;
1116}
1117
1118static int qedi_setup_int(struct qedi_ctx *qedi)
1119{
1120 int rc = 0;
1121
1122 rc = qedi_ops->common->set_fp_int(qedi->cdev, num_online_cpus());
1123 rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info);
1124 if (rc)
1125 goto exit_setup_int;
1126
1127 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
1128 "Number of msix_cnt = 0x%x num of cpus = 0x%x\n",
1129 qedi->int_info.msix_cnt, num_online_cpus());
1130
1131 if (qedi->int_info.msix_cnt) {
1132 rc = qedi_request_msix_irq(qedi);
1133 goto exit_setup_int;
1134 } else {
1135 qedi_ops->common->simd_handler_config(qedi->cdev, &qedi,
1136 QEDI_SIMD_HANDLER_NUM,
1137 qedi_simd_int_handler);
1138 qedi->int_info.used_cnt = 1;
1139 }
1140
1141exit_setup_int:
1142 return rc;
1143}
1144
1145static void qedi_free_bdq(struct qedi_ctx *qedi)
1146{
1147 int i;
1148
1149 if (qedi->bdq_pbl_list)
1150 dma_free_coherent(&qedi->pdev->dev, PAGE_SIZE,
1151 qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma);
1152
1153 if (qedi->bdq_pbl)
1154 dma_free_coherent(&qedi->pdev->dev, qedi->bdq_pbl_mem_size,
1155 qedi->bdq_pbl, qedi->bdq_pbl_dma);
1156
1157 for (i = 0; i < QEDI_BDQ_NUM; i++) {
1158 if (qedi->bdq[i].buf_addr) {
1159 dma_free_coherent(&qedi->pdev->dev, QEDI_BDQ_BUF_SIZE,
1160 qedi->bdq[i].buf_addr,
1161 qedi->bdq[i].buf_dma);
1162 }
1163 }
1164}
1165
1166static void qedi_free_global_queues(struct qedi_ctx *qedi)
1167{
1168 int i;
1169 struct global_queue **gl = qedi->global_queues;
1170
1171 for (i = 0; i < qedi->num_queues; i++) {
1172 if (!gl[i])
1173 continue;
1174
1175 if (gl[i]->cq)
1176 dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_mem_size,
1177 gl[i]->cq, gl[i]->cq_dma);
1178 if (gl[i]->cq_pbl)
1179 dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_pbl_size,
1180 gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
1181
1182 kfree(gl[i]);
1183 }
1184 qedi_free_bdq(qedi);
1185}
1186
1187static int qedi_alloc_bdq(struct qedi_ctx *qedi)
1188{
1189 int i;
1190 struct scsi_bd *pbl;
1191 u64 *list;
1192 dma_addr_t page;
1193
1194 /* Alloc dma memory for BDQ buffers */
1195 for (i = 0; i < QEDI_BDQ_NUM; i++) {
1196 qedi->bdq[i].buf_addr =
1197 dma_alloc_coherent(&qedi->pdev->dev,
1198 QEDI_BDQ_BUF_SIZE,
1199 &qedi->bdq[i].buf_dma,
1200 GFP_KERNEL);
1201 if (!qedi->bdq[i].buf_addr) {
1202 QEDI_ERR(&qedi->dbg_ctx,
1203 "Could not allocate BDQ buffer %d.\n", i);
1204 return -ENOMEM;
1205 }
1206 }
1207
1208 /* Alloc dma memory for BDQ page buffer list */
1209 qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd);
1210 qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, PAGE_SIZE);
1211 qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd);
1212
1213 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n",
1214 qedi->rq_num_entries);
1215
1216 qedi->bdq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
1217 qedi->bdq_pbl_mem_size,
1218 &qedi->bdq_pbl_dma, GFP_KERNEL);
1219 if (!qedi->bdq_pbl) {
1220 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate BDQ PBL.\n");
1221 return -ENOMEM;
1222 }
1223
1224 /*
1225 * Populate BDQ PBL with physical and virtual address of individual
1226 * BDQ buffers
1227 */
1228 pbl = (struct scsi_bd *)qedi->bdq_pbl;
1229 for (i = 0; i < QEDI_BDQ_NUM; i++) {
1230 pbl->address.hi =
1231 cpu_to_le32(QEDI_U64_HI(qedi->bdq[i].buf_dma));
1232 pbl->address.lo =
1233 cpu_to_le32(QEDI_U64_LO(qedi->bdq[i].buf_dma));
1234 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
1235 "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
1236 pbl, pbl->address.hi, pbl->address.lo, i);
1237 pbl->opaque.hi = 0;
1238 pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
1239 pbl++;
1240 }
1241
1242 /* Allocate list of PBL pages */
1243 qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
1244 PAGE_SIZE,
1245 &qedi->bdq_pbl_list_dma,
1246 GFP_KERNEL);
1247 if (!qedi->bdq_pbl_list) {
1248 QEDI_ERR(&qedi->dbg_ctx,
1249 "Could not allocate list of PBL pages.\n");
1250 return -ENOMEM;
1251 }
1252 memset(qedi->bdq_pbl_list, 0, PAGE_SIZE);
1253
1254 /*
1255 * Now populate PBL list with pages that contain pointers to the
1256 * individual buffers.
1257 */
1258 qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / PAGE_SIZE;
1259 list = (u64 *)qedi->bdq_pbl_list;
1260 page = qedi->bdq_pbl_list_dma;
1261 for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
1262 *list = qedi->bdq_pbl_dma;
1263 list++;
1264 page += PAGE_SIZE;
1265 }
1266
1267 return 0;
1268}
1269
1270static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
1271{
1272 u32 *list;
1273 int i;
1274 int status = 0, rc;
1275 u32 *pbl;
1276 dma_addr_t page;
1277 int num_pages;
1278
1279 /*
1280 * Number of global queues (CQ / RQ). This should
1281 * be <= number of available MSIX vectors for the PF
1282 */
1283 if (!qedi->num_queues) {
1284 QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
1285 return 1;
1286 }
1287
1288 /* Make sure we allocated the PBL that will contain the physical
1289 * addresses of our queues
1290 */
1291 if (!qedi->p_cpuq) {
1292 status = 1;
1293 goto mem_alloc_failure;
1294 }
1295
1296 qedi->global_queues = kzalloc((sizeof(struct global_queue *) *
1297 qedi->num_queues), GFP_KERNEL);
1298 if (!qedi->global_queues) {
1299 QEDI_ERR(&qedi->dbg_ctx,
1300 "Unable to allocate global queues array ptr memory\n");
1301 return -ENOMEM;
1302 }
1303 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
1304 "qedi->global_queues=%p.\n", qedi->global_queues);
1305
1306 /* Allocate DMA coherent buffers for BDQ */
1307 rc = qedi_alloc_bdq(qedi);
1308 if (rc)
1309 goto mem_alloc_failure;
1310
1311 /* Allocate a CQ and an associated PBL for each MSI-X
1312 * vector.
1313 */
1314 for (i = 0; i < qedi->num_queues; i++) {
1315 qedi->global_queues[i] =
1316 kzalloc(sizeof(*qedi->global_queues[0]),
1317 GFP_KERNEL);
1318 if (!qedi->global_queues[i]) {
1319 QEDI_ERR(&qedi->dbg_ctx,
1320 "Unable to allocation global queue %d.\n", i);
1321 goto mem_alloc_failure;
1322 }
1323
1324 qedi->global_queues[i]->cq_mem_size =
1325 (QEDI_CQ_SIZE + 8) * sizeof(union iscsi_cqe);
1326 qedi->global_queues[i]->cq_mem_size =
1327 (qedi->global_queues[i]->cq_mem_size +
1328 (QEDI_PAGE_SIZE - 1));
1329
1330 qedi->global_queues[i]->cq_pbl_size =
1331 (qedi->global_queues[i]->cq_mem_size /
1332 QEDI_PAGE_SIZE) * sizeof(void *);
1333 qedi->global_queues[i]->cq_pbl_size =
1334 (qedi->global_queues[i]->cq_pbl_size +
1335 (QEDI_PAGE_SIZE - 1));
1336
1337 qedi->global_queues[i]->cq =
1338 dma_alloc_coherent(&qedi->pdev->dev,
1339 qedi->global_queues[i]->cq_mem_size,
1340 &qedi->global_queues[i]->cq_dma,
1341 GFP_KERNEL);
1342
1343 if (!qedi->global_queues[i]->cq) {
1344 QEDI_WARN(&qedi->dbg_ctx,
1345 "Could not allocate cq.\n");
1346 status = -ENOMEM;
1347 goto mem_alloc_failure;
1348 }
1349 memset(qedi->global_queues[i]->cq, 0,
1350 qedi->global_queues[i]->cq_mem_size);
1351
1352 qedi->global_queues[i]->cq_pbl =
1353 dma_alloc_coherent(&qedi->pdev->dev,
1354 qedi->global_queues[i]->cq_pbl_size,
1355 &qedi->global_queues[i]->cq_pbl_dma,
1356 GFP_KERNEL);
1357
1358 if (!qedi->global_queues[i]->cq_pbl) {
1359 QEDI_WARN(&qedi->dbg_ctx,
1360 "Could not allocate cq PBL.\n");
1361 status = -ENOMEM;
1362 goto mem_alloc_failure;
1363 }
1364 memset(qedi->global_queues[i]->cq_pbl, 0,
1365 qedi->global_queues[i]->cq_pbl_size);
1366
1367 /* Create PBL */
1368 num_pages = qedi->global_queues[i]->cq_mem_size /
1369 QEDI_PAGE_SIZE;
1370 page = qedi->global_queues[i]->cq_dma;
1371 pbl = (u32 *)qedi->global_queues[i]->cq_pbl;
1372
1373 while (num_pages--) {
1374 *pbl = (u32)page;
1375 pbl++;
1376 *pbl = (u32)((u64)page >> 32);
1377 pbl++;
1378 page += QEDI_PAGE_SIZE;
1379 }
1380 }
1381
1382 list = (u32 *)qedi->p_cpuq;
1383
1384 /*
1385 * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
1386 * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points
1387 * to the physical address which contains an array of pointers to the
1388 * physical addresses of the specific queue pages.
1389 */
1390 for (i = 0; i < qedi->num_queues; i++) {
1391 *list = (u32)qedi->global_queues[i]->cq_pbl_dma;
1392 list++;
1393 *list = (u32)((u64)qedi->global_queues[i]->cq_pbl_dma >> 32);
1394 list++;
1395
1396 *list = (u32)0;
1397 list++;
1398 *list = (u32)((u64)0 >> 32);
1399 list++;
1400 }
1401
1402 return 0;
1403
1404mem_alloc_failure:
1405 qedi_free_global_queues(qedi);
1406 return status;
1407}
1408
1409int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
1410{
1411 int rval = 0;
1412 u32 *pbl;
1413 dma_addr_t page;
1414 int num_pages;
1415
1416 if (!ep)
1417 return -EIO;
1418
1419 /* Calculate appropriate queue and PBL sizes */
1420 ep->sq_mem_size = QEDI_SQ_SIZE * sizeof(struct iscsi_wqe);
1421 ep->sq_mem_size += QEDI_PAGE_SIZE - 1;
1422
1423 ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
1424 ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
1425
1426 ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
1427 &ep->sq_dma, GFP_KERNEL);
1428 if (!ep->sq) {
1429 QEDI_WARN(&qedi->dbg_ctx,
1430 "Could not allocate send queue.\n");
1431 rval = -ENOMEM;
1432 goto out;
1433 }
1434 memset(ep->sq, 0, ep->sq_mem_size);
1435
1436 ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
1437 &ep->sq_pbl_dma, GFP_KERNEL);
1438 if (!ep->sq_pbl) {
1439 QEDI_WARN(&qedi->dbg_ctx,
1440 "Could not allocate send queue PBL.\n");
1441 rval = -ENOMEM;
1442 goto out_free_sq;
1443 }
1444 memset(ep->sq_pbl, 0, ep->sq_pbl_size);
1445
1446 /* Create PBL */
1447 num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
1448 page = ep->sq_dma;
1449 pbl = (u32 *)ep->sq_pbl;
1450
1451 while (num_pages--) {
1452 *pbl = (u32)page;
1453 pbl++;
1454 *pbl = (u32)((u64)page >> 32);
1455 pbl++;
1456 page += QEDI_PAGE_SIZE;
1457 }
1458
1459 return rval;
1460
1461out_free_sq:
1462 dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
1463 ep->sq_dma);
1464out:
1465 return rval;
1466}
1467
1468void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
1469{
1470 if (ep->sq_pbl)
1471 dma_free_coherent(&qedi->pdev->dev, ep->sq_pbl_size, ep->sq_pbl,
1472 ep->sq_pbl_dma);
1473 if (ep->sq)
1474 dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
1475 ep->sq_dma);
1476}
1477
1478int qedi_get_task_idx(struct qedi_ctx *qedi)
1479{
1480 s16 tmp_idx;
1481
1482again:
1483 tmp_idx = find_first_zero_bit(qedi->task_idx_map,
1484 MAX_ISCSI_TASK_ENTRIES);
1485
1486 if (tmp_idx >= MAX_ISCSI_TASK_ENTRIES) {
1487 QEDI_ERR(&qedi->dbg_ctx, "FW task context pool is full.\n");
1488 tmp_idx = -1;
1489 goto err_idx;
1490 }
1491
1492 if (test_and_set_bit(tmp_idx, qedi->task_idx_map))
1493 goto again;
1494
1495err_idx:
1496 return tmp_idx;
1497}
1498
1499void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
1500{
1501 if (!test_and_clear_bit(idx, qedi->task_idx_map)) {
1502 QEDI_ERR(&qedi->dbg_ctx,
1503 "FW task context, already cleared, tid=0x%x\n", idx);
1504 WARN_ON(1);
1505 }
1506}
1507
1508void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
1509 struct qedi_cmd *cmd)
1510{
1511 qedi->itt_map[tid].itt = proto_itt;
1512 qedi->itt_map[tid].p_cmd = cmd;
1513
1514 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
1515 "update itt map tid=0x%x, with proto itt=0x%x\n", tid,
1516 qedi->itt_map[tid].itt);
1517}
1518
1519void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid)
1520{
1521 u16 i;
1522
1523 for (i = 0; i < MAX_ISCSI_TASK_ENTRIES; i++) {
1524 if (qedi->itt_map[i].itt == itt) {
1525 *tid = i;
1526 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
1527 "Ref itt=0x%x, found at tid=0x%x\n",
1528 itt, *tid);
1529 return;
1530 }
1531 }
1532
1533 WARN_ON(1);
1534}
1535
1536void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt)
1537{
1538 *proto_itt = qedi->itt_map[tid].itt;
1539 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
1540 "Get itt map tid [0x%x with proto itt[0x%x]",
1541 tid, *proto_itt);
1542}
1543
1544struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid)
1545{
1546 struct qedi_cmd *cmd = NULL;
1547
1548 if (tid > MAX_ISCSI_TASK_ENTRIES)
1549 return NULL;
1550
1551 cmd = qedi->itt_map[tid].p_cmd;
1552 if (cmd->task_id != tid)
1553 return NULL;
1554
1555 qedi->itt_map[tid].p_cmd = NULL;
1556
1557 return cmd;
1558}
1559
1560static int qedi_alloc_itt(struct qedi_ctx *qedi)
1561{
1562 qedi->itt_map = kcalloc(MAX_ISCSI_TASK_ENTRIES,
1563 sizeof(struct qedi_itt_map), GFP_KERNEL);
1564 if (!qedi->itt_map) {
1565 QEDI_ERR(&qedi->dbg_ctx,
1566 "Unable to allocate itt map array memory\n");
1567 return -ENOMEM;
1568 }
1569 return 0;
1570}
1571
1572static void qedi_free_itt(struct qedi_ctx *qedi)
1573{
1574 kfree(qedi->itt_map);
1575}
1576
1577static struct qed_ll2_cb_ops qedi_ll2_cb_ops = {
1578 .rx_cb = qedi_ll2_rx,
1579 .tx_cb = NULL,
1580};
1581
1582static int qedi_percpu_io_thread(void *arg)
1583{
1584 struct qedi_percpu_s *p = arg;
1585 struct qedi_work *work, *tmp;
1586 unsigned long flags;
1587 LIST_HEAD(work_list);
1588
1589 set_user_nice(current, -20);
1590
1591 while (!kthread_should_stop()) {
1592 spin_lock_irqsave(&p->p_work_lock, flags);
1593 while (!list_empty(&p->work_list)) {
1594 list_splice_init(&p->work_list, &work_list);
1595 spin_unlock_irqrestore(&p->p_work_lock, flags);
1596
1597 list_for_each_entry_safe(work, tmp, &work_list, list) {
1598 list_del_init(&work->list);
1599 qedi_fp_process_cqes(work);
1600 if (!work->is_solicited)
1601 kfree(work);
1602 }
1603 cond_resched();
1604 spin_lock_irqsave(&p->p_work_lock, flags);
1605 }
1606 set_current_state(TASK_INTERRUPTIBLE);
1607 spin_unlock_irqrestore(&p->p_work_lock, flags);
1608 schedule();
1609 }
1610 __set_current_state(TASK_RUNNING);
1611
1612 return 0;
1613}
1614
1615static void qedi_percpu_thread_create(unsigned int cpu)
1616{
1617 struct qedi_percpu_s *p;
1618 struct task_struct *thread;
1619
1620 p = &per_cpu(qedi_percpu, cpu);
1621
1622 thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
1623 cpu_to_node(cpu),
1624 "qedi_thread/%d", cpu);
1625 if (likely(!IS_ERR(thread))) {
1626 kthread_bind(thread, cpu);
1627 p->iothread = thread;
1628 wake_up_process(thread);
1629 }
1630}
1631
1632static void qedi_percpu_thread_destroy(unsigned int cpu)
1633{
1634 struct qedi_percpu_s *p;
1635 struct task_struct *thread;
1636 struct qedi_work *work, *tmp;
1637
1638 p = &per_cpu(qedi_percpu, cpu);
1639 spin_lock_bh(&p->p_work_lock);
1640 thread = p->iothread;
1641 p->iothread = NULL;
1642
1643 list_for_each_entry_safe(work, tmp, &p->work_list, list) {
1644 list_del_init(&work->list);
1645 qedi_fp_process_cqes(work);
1646 if (!work->is_solicited)
1647 kfree(work);
1648 }
1649
1650 spin_unlock_bh(&p->p_work_lock);
1651 if (thread)
1652 kthread_stop(thread);
1653}
1654
1655static int qedi_cpu_callback(struct notifier_block *nfb,
1656 unsigned long action, void *hcpu)
1657{
1658 unsigned int cpu = (unsigned long)hcpu;
1659
1660 switch (action) {
1661 case CPU_ONLINE:
1662 case CPU_ONLINE_FROZEN:
1663 QEDI_ERR(NULL, "CPU %d online.\n", cpu);
1664 qedi_percpu_thread_create(cpu);
1665 break;
1666 case CPU_DEAD:
1667 case CPU_DEAD_FROZEN:
1668 QEDI_ERR(NULL, "CPU %d offline.\n", cpu);
1669 qedi_percpu_thread_destroy(cpu);
1670 break;
1671 default:
1672 break;
1673 }
1674
1675 return NOTIFY_OK;
1676}
1677
1678static struct notifier_block qedi_cpu_notifier = {
1679 .notifier_call = qedi_cpu_callback,
1680};
1681
1682void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
1683{
1684 struct qed_ll2_params params;
1685
1686 qedi_recover_all_conns(qedi);
1687
1688 qedi_ops->ll2->stop(qedi->cdev);
1689 qedi_ll2_free_skbs(qedi);
1690
1691 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "old MTU %u, new MTU %u\n",
1692 qedi->ll2_mtu, mtu);
1693 memset(&params, 0, sizeof(params));
1694 qedi->ll2_mtu = mtu;
1695 params.mtu = qedi->ll2_mtu + IPV6_HDR_LEN + TCP_HDR_LEN;
1696 params.drop_ttl0_packets = 0;
1697 params.rx_vlan_stripping = 1;
1698 ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
1699 qedi_ops->ll2->start(qedi->cdev, &params);
1700}
1701
1702static void __qedi_remove(struct pci_dev *pdev, int mode)
1703{
1704 struct qedi_ctx *qedi = pci_get_drvdata(pdev);
1705
1706 if (qedi->tmf_thread) {
1707 flush_workqueue(qedi->tmf_thread);
1708 destroy_workqueue(qedi->tmf_thread);
1709 qedi->tmf_thread = NULL;
1710 }
1711
1712 if (qedi->offload_thread) {
1713 flush_workqueue(qedi->offload_thread);
1714 destroy_workqueue(qedi->offload_thread);
1715 qedi->offload_thread = NULL;
1716 }
1717
1718#ifdef CONFIG_DEBUG_FS
1719 qedi_dbg_host_exit(&qedi->dbg_ctx);
1720#endif
1721 if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags))
1722 qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
1723
1724 qedi_sync_free_irqs(qedi);
1725
1726 if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
1727 qedi_ops->stop(qedi->cdev);
1728 qedi_ops->ll2->stop(qedi->cdev);
1729 }
1730
1731 if (mode == QEDI_MODE_NORMAL)
1732 qedi_free_iscsi_pf_param(qedi);
1733
1734 if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
1735 qedi_ops->common->slowpath_stop(qedi->cdev);
1736 qedi_ops->common->remove(qedi->cdev);
1737 }
1738
1739 qedi_destroy_fp(qedi);
1740
1741 if (mode == QEDI_MODE_NORMAL) {
1742 qedi_release_cid_que(qedi);
1743 qedi_cm_free_mem(qedi);
1744 qedi_free_uio(qedi->udev);
1745 qedi_free_itt(qedi);
1746
1747 iscsi_host_remove(qedi->shost);
1748 iscsi_host_free(qedi->shost);
1749
1750 if (qedi->ll2_recv_thread) {
1751 kthread_stop(qedi->ll2_recv_thread);
1752 qedi->ll2_recv_thread = NULL;
1753 }
1754 qedi_ll2_free_skbs(qedi);
1755 }
1756}
1757
1758static int __qedi_probe(struct pci_dev *pdev, int mode)
1759{
1760 struct qedi_ctx *qedi;
1761 struct qed_ll2_params params;
1762 u32 dp_module = 0;
1763 u8 dp_level = 0;
1764 bool is_vf = false;
1765 char host_buf[16];
1766 struct qed_link_params link_params;
1767 struct qed_slowpath_params sp_params;
1768 struct qed_probe_params qed_params;
1769 void *task_start, *task_end;
1770 int rc;
1771 u16 tmp;
1772
1773 if (mode != QEDI_MODE_RECOVERY) {
1774 qedi = qedi_host_alloc(pdev);
1775 if (!qedi) {
1776 rc = -ENOMEM;
1777 goto exit_probe;
1778 }
1779 } else {
1780 qedi = pci_get_drvdata(pdev);
1781 }
1782
1783 memset(&qed_params, 0, sizeof(qed_params));
1784 qed_params.protocol = QED_PROTOCOL_ISCSI;
1785 qed_params.dp_module = dp_module;
1786 qed_params.dp_level = dp_level;
1787 qed_params.is_vf = is_vf;
1788 qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
1789 if (!qedi->cdev) {
1790 rc = -ENODEV;
1791 QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n");
1792 goto free_host;
1793 }
1794
1795 qedi->msix_count = MAX_NUM_MSIX_PF;
1796 atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
1797
1798 if (mode != QEDI_MODE_RECOVERY) {
1799 rc = qedi_set_iscsi_pf_param(qedi);
1800 if (rc) {
1801 rc = -ENOMEM;
1802 QEDI_ERR(&qedi->dbg_ctx,
1803 "Set iSCSI pf param fail\n");
1804 goto free_host;
1805 }
1806 }
1807
1808 qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
1809
1810 rc = qedi_prepare_fp(qedi);
1811 if (rc) {
1812 QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath.\n");
1813 goto free_pf_params;
1814 }
1815
1816 /* Start the Slowpath-process */
1817 memset(&sp_params, 0, sizeof(struct qed_slowpath_params));
1818 sp_params.int_mode = QED_INT_MODE_MSIX;
1819 sp_params.drv_major = QEDI_DRIVER_MAJOR_VER;
1820 sp_params.drv_minor = QEDI_DRIVER_MINOR_VER;
1821 sp_params.drv_rev = QEDI_DRIVER_REV_VER;
1822 sp_params.drv_eng = QEDI_DRIVER_ENG_VER;
1823 strlcpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE);
1824 rc = qedi_ops->common->slowpath_start(qedi->cdev, &sp_params);
1825 if (rc) {
1826 QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath\n");
1827 goto stop_hw;
1828 }
1829
1830 /* update_pf_params needs to be called before and after slowpath
1831 * start
1832 */
1833 qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
1834
1835 qedi_setup_int(qedi);
1836 if (rc)
1837 goto stop_iscsi_func;
1838
1839 qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
1840
1841 /* Learn information crucial for qedi to progress */
1842 rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
1843 if (rc)
1844 goto stop_iscsi_func;
1845
1846 /* Record BDQ producer doorbell addresses */
1847 qedi->bdq_primary_prod = qedi->dev_info.primary_dbq_rq_addr;
1848 qedi->bdq_secondary_prod = qedi->dev_info.secondary_bdq_rq_addr;
1849 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
1850 "BDQ primary_prod=%p secondary_prod=%p.\n",
1851 qedi->bdq_primary_prod,
1852 qedi->bdq_secondary_prod);
1853
1854 /*
1855 * We need to write the number of BDs in the BDQ we've preallocated so
1856 * the f/w will do a prefetch and we'll get an unsolicited CQE when a
1857 * packet arrives.
1858 */
1859 qedi->bdq_prod_idx = QEDI_BDQ_NUM;
1860 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
1861 "Writing %d to primary and secondary BDQ doorbell registers.\n",
1862 qedi->bdq_prod_idx);
1863 writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
1864 tmp = readw(qedi->bdq_primary_prod);
1865 writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
1866 tmp = readw(qedi->bdq_secondary_prod);
1867
1868 ether_addr_copy(qedi->mac, qedi->dev_info.common.hw_mac);
1869 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n",
1870 qedi->mac);
1871
1872 sprintf(host_buf, "host_%d", qedi->shost->host_no);
1873 qedi_ops->common->set_id(qedi->cdev, host_buf, QEDI_MODULE_VERSION);
1874
1875 qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
1876
1877 memset(&params, 0, sizeof(params));
1878 params.mtu = DEF_PATH_MTU + IPV6_HDR_LEN + TCP_HDR_LEN;
1879 qedi->ll2_mtu = DEF_PATH_MTU;
1880 params.drop_ttl0_packets = 0;
1881 params.rx_vlan_stripping = 1;
1882 ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
1883
1884 if (mode != QEDI_MODE_RECOVERY) {
1885 /* set up rx path */
1886 INIT_LIST_HEAD(&qedi->ll2_skb_list);
1887 spin_lock_init(&qedi->ll2_lock);
1888 /* start qedi context */
1889 spin_lock_init(&qedi->hba_lock);
1890 spin_lock_init(&qedi->task_idx_lock);
1891 }
1892 qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
1893 qedi_ops->ll2->start(qedi->cdev, &params);
1894
1895 if (mode != QEDI_MODE_RECOVERY) {
1896 qedi->ll2_recv_thread = kthread_run(qedi_ll2_recv_thread,
1897 (void *)qedi,
1898 "qedi_ll2_thread");
1899 }
1900
1901 rc = qedi_ops->start(qedi->cdev, &qedi->tasks,
1902 qedi, qedi_iscsi_event_cb);
1903 if (rc) {
1904 rc = -ENODEV;
1905 QEDI_ERR(&qedi->dbg_ctx, "Cannot start iSCSI function\n");
1906 goto stop_slowpath;
1907 }
1908
1909 task_start = qedi_get_task_mem(&qedi->tasks, 0);
1910 task_end = qedi_get_task_mem(&qedi->tasks, MAX_TID_BLOCKS_ISCSI - 1);
1911 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
1912 "Task context start=%p, end=%p block_size=%u.\n",
1913 task_start, task_end, qedi->tasks.size);
1914
1915 memset(&link_params, 0, sizeof(link_params));
1916 link_params.link_up = true;
1917 rc = qedi_ops->common->set_link(qedi->cdev, &link_params);
1918 if (rc) {
1919 QEDI_WARN(&qedi->dbg_ctx, "Link set up failed.\n");
1920 atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
1921 }
1922
1923#ifdef CONFIG_DEBUG_FS
1924 qedi_dbg_host_init(&qedi->dbg_ctx, &qedi_debugfs_ops,
1925 &qedi_dbg_fops);
1926#endif
1927 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1928 "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n",
1929 QEDI_MODULE_VERSION, FW_MAJOR_VERSION, FW_MINOR_VERSION,
1930 FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
1931
1932 if (mode == QEDI_MODE_NORMAL) {
1933 if (iscsi_host_add(qedi->shost, &pdev->dev)) {
1934 QEDI_ERR(&qedi->dbg_ctx,
1935 "Could not add iscsi host\n");
1936 rc = -ENOMEM;
1937 goto remove_host;
1938 }
1939
1940 /* Allocate uio buffers */
1941 rc = qedi_alloc_uio_rings(qedi);
1942 if (rc) {
1943 QEDI_ERR(&qedi->dbg_ctx,
1944 "UIO alloc ring failed err=%d\n", rc);
1945 goto remove_host;
1946 }
1947
1948 rc = qedi_init_uio(qedi);
1949 if (rc) {
1950 QEDI_ERR(&qedi->dbg_ctx,
1951 "UIO init failed, err=%d\n", rc);
1952 goto free_uio;
1953 }
1954
1955 /* host the array on iscsi_conn */
1956 rc = qedi_setup_cid_que(qedi);
1957 if (rc) {
1958 QEDI_ERR(&qedi->dbg_ctx,
1959 "Could not setup cid que\n");
1960 goto free_uio;
1961 }
1962
1963 rc = qedi_cm_alloc_mem(qedi);
1964 if (rc) {
1965 QEDI_ERR(&qedi->dbg_ctx,
1966 "Could not alloc cm memory\n");
1967 goto free_cid_que;
1968 }
1969
1970 rc = qedi_alloc_itt(qedi);
1971 if (rc) {
1972 QEDI_ERR(&qedi->dbg_ctx,
1973 "Could not alloc itt memory\n");
1974 goto free_cid_que;
1975 }
1976
1977 sprintf(host_buf, "host_%d", qedi->shost->host_no);
1978 qedi->tmf_thread = create_singlethread_workqueue(host_buf);
1979 if (!qedi->tmf_thread) {
1980 QEDI_ERR(&qedi->dbg_ctx,
1981 "Unable to start tmf thread!\n");
1982 rc = -ENODEV;
1983 goto free_cid_que;
1984 }
1985
1986 sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no);
1987 qedi->offload_thread = create_workqueue(host_buf);
1988 if (!qedi->offload_thread) {
1989 QEDI_ERR(&qedi->dbg_ctx,
1990 "Unable to start offload thread!\n");
1991 rc = -ENODEV;
1992 goto free_cid_que;
1993 }
1994
1995 /* F/w needs 1st task context memory entry for performance */
1996 set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
1997 atomic_set(&qedi->num_offloads, 0);
1998 }
1999
2000 return 0;
2001
2002free_cid_que:
2003 qedi_release_cid_que(qedi);
2004free_uio:
2005 qedi_free_uio(qedi->udev);
2006remove_host:
2007#ifdef CONFIG_DEBUG_FS
2008 qedi_dbg_host_exit(&qedi->dbg_ctx);
2009#endif
2010 iscsi_host_remove(qedi->shost);
2011stop_iscsi_func:
2012 qedi_ops->stop(qedi->cdev);
2013stop_slowpath:
2014 qedi_ops->common->slowpath_stop(qedi->cdev);
2015stop_hw:
2016 qedi_ops->common->remove(qedi->cdev);
2017free_pf_params:
2018 qedi_free_iscsi_pf_param(qedi);
2019free_host:
2020 iscsi_host_free(qedi->shost);
2021exit_probe:
2022 return rc;
2023}
2024
2025static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2026{
2027 return __qedi_probe(pdev, QEDI_MODE_NORMAL);
2028}
2029
2030static void qedi_remove(struct pci_dev *pdev)
2031{
2032 __qedi_remove(pdev, QEDI_MODE_NORMAL);
2033}
2034
2035static struct pci_device_id qedi_pci_tbl[] = {
2036 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
2037 { 0 },
2038};
2039MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
2040
2041static struct pci_driver qedi_pci_driver = {
2042 .name = QEDI_MODULE_NAME,
2043 .id_table = qedi_pci_tbl,
2044 .probe = qedi_probe,
2045 .remove = qedi_remove,
2046};
2047
2048static int __init qedi_init(void)
2049{
2050 int rc = 0;
2051 int ret;
2052 struct qedi_percpu_s *p;
2053 unsigned int cpu = 0;
2054
2055 qedi_ops = qed_get_iscsi_ops();
2056 if (!qedi_ops) {
2057 QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n");
2058 rc = -EINVAL;
2059 goto exit_qedi_init_0;
2060 }
2061
2062#ifdef CONFIG_DEBUG_FS
2063 qedi_dbg_init("qedi");
2064#endif
2065
2066 qedi_scsi_transport = iscsi_register_transport(&qedi_iscsi_transport);
2067 if (!qedi_scsi_transport) {
2068 QEDI_ERR(NULL, "Could not register qedi transport");
2069 rc = -ENOMEM;
2070 goto exit_qedi_init_1;
2071 }
2072
2073 register_hotcpu_notifier(&qedi_cpu_notifier);
2074
2075 ret = pci_register_driver(&qedi_pci_driver);
2076 if (ret) {
2077 QEDI_ERR(NULL, "Failed to register driver\n");
2078 rc = -EINVAL;
2079 goto exit_qedi_init_2;
2080 }
2081
2082 for_each_possible_cpu(cpu) {
2083 p = &per_cpu(qedi_percpu, cpu);
2084 INIT_LIST_HEAD(&p->work_list);
2085 spin_lock_init(&p->p_work_lock);
2086 p->iothread = NULL;
2087 }
2088
2089 for_each_online_cpu(cpu)
2090 qedi_percpu_thread_create(cpu);
2091
2092 return rc;
2093
2094exit_qedi_init_2:
2095 iscsi_unregister_transport(&qedi_iscsi_transport);
2096exit_qedi_init_1:
2097#ifdef CONFIG_DEBUG_FS
2098 qedi_dbg_exit();
2099#endif
2100 qed_put_iscsi_ops();
2101exit_qedi_init_0:
2102 return rc;
2103}
2104
2105static void __exit qedi_cleanup(void)
2106{
2107 unsigned int cpu = 0;
2108
2109 for_each_online_cpu(cpu)
2110 qedi_percpu_thread_destroy(cpu);
2111
2112 pci_unregister_driver(&qedi_pci_driver);
2113 unregister_hotcpu_notifier(&qedi_cpu_notifier);
2114 iscsi_unregister_transport(&qedi_iscsi_transport);
2115
2116#ifdef CONFIG_DEBUG_FS
2117 qedi_dbg_exit();
2118#endif
2119 qed_put_iscsi_ops();
2120}
2121
2122MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx iSCSI Module");
2123MODULE_LICENSE("GPL");
2124MODULE_AUTHOR("QLogic Corporation");
2125MODULE_VERSION(QEDI_MODULE_VERSION);
2126module_init(qedi_init);
2127module_exit(qedi_cleanup);
diff --git a/drivers/scsi/qedi/qedi_sysfs.c b/drivers/scsi/qedi/qedi_sysfs.c
new file mode 100644
index 000000000000..b10c48bd1428
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_sysfs.c
@@ -0,0 +1,52 @@
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#include "qedi.h"
11#include "qedi_gbl.h"
12#include "qedi_iscsi.h"
13#include "qedi_dbg.h"
14
15static inline struct qedi_ctx *qedi_dev_to_hba(struct device *dev)
16{
17 struct Scsi_Host *shost = class_to_shost(dev);
18
19 return iscsi_host_priv(shost);
20}
21
22static ssize_t qedi_show_port_state(struct device *dev,
23 struct device_attribute *attr,
24 char *buf)
25{
26 struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
27
28 if (atomic_read(&qedi->link_state) == QEDI_LINK_UP)
29 return sprintf(buf, "Online\n");
30 else
31 return sprintf(buf, "Linkdown\n");
32}
33
34static ssize_t qedi_show_speed(struct device *dev,
35 struct device_attribute *attr, char *buf)
36{
37 struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
38 struct qed_link_output if_link;
39
40 qedi_ops->common->get_link(qedi->cdev, &if_link);
41
42 return sprintf(buf, "%d Gbit\n", if_link.speed / 1000);
43}
44
45static DEVICE_ATTR(port_state, 0444, qedi_show_port_state, NULL);
46static DEVICE_ATTR(speed, 0444, qedi_show_speed, NULL);
47
48struct device_attribute *qedi_shost_attrs[] = {
49 &dev_attr_port_state,
50 &dev_attr_speed,
51 NULL
52};
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
new file mode 100644
index 000000000000..9543a1b139d4
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -0,0 +1,14 @@
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#define QEDI_MODULE_VERSION "8.10.3.0"
11#define QEDI_DRIVER_MAJOR_VER 8
12#define QEDI_DRIVER_MINOR_VER 10
13#define QEDI_DRIVER_REV_VER 3
14#define QEDI_DRIVER_ENG_VER 0
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index fe7469c901f7..47eb4d545d13 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1988,9 +1988,9 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1988 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 1988 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1989 scsi_qla_host_t *vha = NULL; 1989 scsi_qla_host_t *vha = NULL;
1990 struct qla_hw_data *ha = base_vha->hw; 1990 struct qla_hw_data *ha = base_vha->hw;
1991 uint16_t options = 0;
1992 int cnt; 1991 int cnt;
1993 struct req_que *req = ha->req_q_map[0]; 1992 struct req_que *req = ha->req_q_map[0];
1993 struct qla_qpair *qpair;
1994 1994
1995 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1995 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1996 if (ret) { 1996 if (ret) {
@@ -2075,15 +2075,9 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
2075 qlt_vport_create(vha, ha); 2075 qlt_vport_create(vha, ha);
2076 qla24xx_vport_disable(fc_vport, disable); 2076 qla24xx_vport_disable(fc_vport, disable);
2077 2077
2078 if (ha->flags.cpu_affinity_enabled) { 2078 if (!ql2xmqsupport || !ha->npiv_info)
2079 req = ha->req_q_map[1];
2080 ql_dbg(ql_dbg_multiq, vha, 0xc000,
2081 "Request queue %p attached with "
2082 "VP[%d], cpu affinity =%d\n",
2083 req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
2084 goto vport_queue;
2085 } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
2086 goto vport_queue; 2079 goto vport_queue;
2080
2087 /* Create a request queue in QoS mode for the vport */ 2081 /* Create a request queue in QoS mode for the vport */
2088 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) { 2082 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
2089 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0 2083 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
@@ -2095,20 +2089,20 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
2095 } 2089 }
2096 2090
2097 if (qos) { 2091 if (qos) {
2098 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, 2092 qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx);
2099 qos); 2093 if (!qpair)
2100 if (!ret)
2101 ql_log(ql_log_warn, vha, 0x7084, 2094 ql_log(ql_log_warn, vha, 0x7084,
2102 "Can't create request queue for VP[%d]\n", 2095 "Can't create qpair for VP[%d]\n",
2103 vha->vp_idx); 2096 vha->vp_idx);
2104 else { 2097 else {
2105 ql_dbg(ql_dbg_multiq, vha, 0xc001, 2098 ql_dbg(ql_dbg_multiq, vha, 0xc001,
2106 "Request Que:%d Q0s: %d) created for VP[%d]\n", 2099 "Queue pair: %d Qos: %d) created for VP[%d]\n",
2107 ret, qos, vha->vp_idx); 2100 qpair->id, qos, vha->vp_idx);
2108 ql_dbg(ql_dbg_user, vha, 0x7085, 2101 ql_dbg(ql_dbg_user, vha, 0x7085,
2109 "Request Que:%d Q0s: %d) created for VP[%d]\n", 2102 "Queue Pair: %d Qos: %d) created for VP[%d]\n",
2110 ret, qos, vha->vp_idx); 2103 qpair->id, qos, vha->vp_idx);
2111 req = ha->req_q_map[ret]; 2104 req = qpair->req;
2105 vha->qpair = qpair;
2112 } 2106 }
2113 } 2107 }
2114 2108
@@ -2162,10 +2156,10 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
2162 clear_bit(vha->vp_idx, ha->vp_idx_map); 2156 clear_bit(vha->vp_idx, ha->vp_idx_map);
2163 mutex_unlock(&ha->vport_lock); 2157 mutex_unlock(&ha->vport_lock);
2164 2158
2165 if (vha->req->id && !ha->flags.cpu_affinity_enabled) { 2159 if (vha->qpair->vp_idx == vha->vp_idx) {
2166 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) 2160 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
2167 ql_log(ql_log_warn, vha, 0x7087, 2161 ql_log(ql_log_warn, vha, 0x7087,
2168 "Queue delete failed.\n"); 2162 "Queue Pair delete failed.\n");
2169 } 2163 }
2170 2164
2171 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id); 2165 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 45af34ddc432..21d9fb7fc887 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,7 +11,7 @@
11 * ---------------------------------------------------------------------- 11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes | 12 * | Level | Last Value Used | Holes |
13 * ---------------------------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0191 | 0x0146 | 14 * | Module Init and Probe | 0x0193 | 0x0146 |
15 * | | | 0x015b-0x0160 | 15 * | | | 0x015b-0x0160 |
16 * | | | 0x016e | 16 * | | | 0x016e |
17 * | Mailbox commands | 0x1199 | 0x1193 | 17 * | Mailbox commands | 0x1199 | 0x1193 |
@@ -58,7 +58,7 @@
58 * | | | 0xb13a,0xb142 | 58 * | | | 0xb13a,0xb142 |
59 * | | | 0xb13c-0xb140 | 59 * | | | 0xb13c-0xb140 |
60 * | | | 0xb149 | 60 * | | | 0xb149 |
61 * | MultiQ | 0xc00c | | 61 * | MultiQ | 0xc010 | |
62 * | Misc | 0xd301 | 0xd031-0xd0ff | 62 * | Misc | 0xd301 | 0xd031-0xd0ff |
63 * | | | 0xd101-0xd1fe | 63 * | | | 0xd101-0xd1fe |
64 * | | | 0xd214-0xd2fe | 64 * | | | 0xd214-0xd2fe |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 5236e3f2a06a..f7df01b76714 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -401,6 +401,7 @@ typedef struct srb {
401 uint16_t type; 401 uint16_t type;
402 char *name; 402 char *name;
403 int iocbs; 403 int iocbs;
404 struct qla_qpair *qpair;
404 union { 405 union {
405 struct srb_iocb iocb_cmd; 406 struct srb_iocb iocb_cmd;
406 struct bsg_job *bsg_job; 407 struct bsg_job *bsg_job;
@@ -2719,6 +2720,7 @@ struct isp_operations {
2719 2720
2720 int (*get_flash_version) (struct scsi_qla_host *, void *); 2721 int (*get_flash_version) (struct scsi_qla_host *, void *);
2721 int (*start_scsi) (srb_t *); 2722 int (*start_scsi) (srb_t *);
2723 int (*start_scsi_mq) (srb_t *);
2722 int (*abort_isp) (struct scsi_qla_host *); 2724 int (*abort_isp) (struct scsi_qla_host *);
2723 int (*iospace_config)(struct qla_hw_data*); 2725 int (*iospace_config)(struct qla_hw_data*);
2724 int (*initialize_adapter)(struct scsi_qla_host *); 2726 int (*initialize_adapter)(struct scsi_qla_host *);
@@ -2730,8 +2732,10 @@ struct isp_operations {
2730#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7) 2732#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
2731#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1) 2733#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1)
2732 2734
2733#define QLA_MSIX_DEFAULT 0x00 2735#define QLA_MSIX_DEFAULT 0x00
2734#define QLA_MSIX_RSP_Q 0x01 2736#define QLA_MSIX_RSP_Q 0x01
2737#define QLA_ATIO_VECTOR 0x02
2738#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
2735 2739
2736#define QLA_MIDX_DEFAULT 0 2740#define QLA_MIDX_DEFAULT 0
2737#define QLA_MIDX_RSP_Q 1 2741#define QLA_MIDX_RSP_Q 1
@@ -2745,9 +2749,11 @@ struct scsi_qla_host;
2745 2749
2746struct qla_msix_entry { 2750struct qla_msix_entry {
2747 int have_irq; 2751 int have_irq;
2752 int in_use;
2748 uint32_t vector; 2753 uint32_t vector;
2749 uint16_t entry; 2754 uint16_t entry;
2750 struct rsp_que *rsp; 2755 char name[30];
2756 void *handle;
2751 struct irq_affinity_notify irq_notify; 2757 struct irq_affinity_notify irq_notify;
2752 int cpuid; 2758 int cpuid;
2753}; 2759};
@@ -2872,7 +2878,6 @@ struct rsp_que {
2872 struct qla_msix_entry *msix; 2878 struct qla_msix_entry *msix;
2873 struct req_que *req; 2879 struct req_que *req;
2874 srb_t *status_srb; /* status continuation entry */ 2880 srb_t *status_srb; /* status continuation entry */
2875 struct work_struct q_work;
2876 2881
2877 dma_addr_t dma_fx00; 2882 dma_addr_t dma_fx00;
2878 response_t *ring_fx00; 2883 response_t *ring_fx00;
@@ -2909,6 +2914,37 @@ struct req_que {
2909 uint8_t req_pkt[REQUEST_ENTRY_SIZE]; 2914 uint8_t req_pkt[REQUEST_ENTRY_SIZE];
2910}; 2915};
2911 2916
2917/*Queue pair data structure */
2918struct qla_qpair {
2919 spinlock_t qp_lock;
2920 atomic_t ref_count;
2921 /* distill these fields down to 'online=0/1'
2922 * ha->flags.eeh_busy
2923 * ha->flags.pci_channel_io_perm_failure
2924 * base_vha->loop_state
2925 */
2926 uint32_t online:1;
2927 /* move vha->flags.difdix_supported here */
2928 uint32_t difdix_supported:1;
2929 uint32_t delete_in_progress:1;
2930
2931 uint16_t id; /* qp number used with FW */
2932 uint16_t num_active_cmd; /* cmds down at firmware */
2933 cpumask_t cpu_mask; /* CPU mask for cpu affinity operation */
2934 uint16_t vp_idx; /* vport ID */
2935
2936 mempool_t *srb_mempool;
2937
2938 /* to do: New driver: move queues to here instead of pointers */
2939 struct req_que *req;
2940 struct rsp_que *rsp;
2941 struct atio_que *atio;
2942 struct qla_msix_entry *msix; /* point to &ha->msix_entries[x] */
2943 struct qla_hw_data *hw;
2944 struct work_struct q_work;
2945 struct list_head qp_list_elem; /* vha->qp_list */
2946};
2947
2912/* Place holder for FW buffer parameters */ 2948/* Place holder for FW buffer parameters */
2913struct qlfc_fw { 2949struct qlfc_fw {
2914 void *fw_buf; 2950 void *fw_buf;
@@ -3004,7 +3040,6 @@ struct qla_hw_data {
3004 uint32_t chip_reset_done :1; 3040 uint32_t chip_reset_done :1;
3005 uint32_t running_gold_fw :1; 3041 uint32_t running_gold_fw :1;
3006 uint32_t eeh_busy :1; 3042 uint32_t eeh_busy :1;
3007 uint32_t cpu_affinity_enabled :1;
3008 uint32_t disable_msix_handshake :1; 3043 uint32_t disable_msix_handshake :1;
3009 uint32_t fcp_prio_enabled :1; 3044 uint32_t fcp_prio_enabled :1;
3010 uint32_t isp82xx_fw_hung:1; 3045 uint32_t isp82xx_fw_hung:1;
@@ -3061,10 +3096,15 @@ struct qla_hw_data {
3061 uint8_t mqenable; 3096 uint8_t mqenable;
3062 struct req_que **req_q_map; 3097 struct req_que **req_q_map;
3063 struct rsp_que **rsp_q_map; 3098 struct rsp_que **rsp_q_map;
3099 struct qla_qpair **queue_pair_map;
3064 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; 3100 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
3065 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; 3101 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
3102 unsigned long qpair_qid_map[(QLA_MAX_QUEUES / 8)
3103 / sizeof(unsigned long)];
3066 uint8_t max_req_queues; 3104 uint8_t max_req_queues;
3067 uint8_t max_rsp_queues; 3105 uint8_t max_rsp_queues;
3106 uint8_t max_qpairs;
3107 struct qla_qpair *base_qpair;
3068 struct qla_npiv_entry *npiv_info; 3108 struct qla_npiv_entry *npiv_info;
3069 uint16_t nvram_npiv_size; 3109 uint16_t nvram_npiv_size;
3070 3110
@@ -3328,6 +3368,7 @@ struct qla_hw_data {
3328 3368
3329 struct mutex vport_lock; /* Virtual port synchronization */ 3369 struct mutex vport_lock; /* Virtual port synchronization */
3330 spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */ 3370 spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
3371 struct mutex mq_lock; /* multi-queue synchronization */
3331 struct completion mbx_cmd_comp; /* Serialize mbx access */ 3372 struct completion mbx_cmd_comp; /* Serialize mbx access */
3332 struct completion mbx_intr_comp; /* Used for completion notification */ 3373 struct completion mbx_intr_comp; /* Used for completion notification */
3333 struct completion dcbx_comp; /* For set port config notification */ 3374 struct completion dcbx_comp; /* For set port config notification */
@@ -3608,6 +3649,7 @@ typedef struct scsi_qla_host {
3608 3649
3609 uint32_t fw_tgt_reported:1; 3650 uint32_t fw_tgt_reported:1;
3610 uint32_t bbcr_enable:1; 3651 uint32_t bbcr_enable:1;
3652 uint32_t qpairs_available:1;
3611 } flags; 3653 } flags;
3612 3654
3613 atomic_t loop_state; 3655 atomic_t loop_state;
@@ -3646,6 +3688,7 @@ typedef struct scsi_qla_host {
3646#define FX00_TARGET_SCAN 24 3688#define FX00_TARGET_SCAN 24
3647#define FX00_CRITEMP_RECOVERY 25 3689#define FX00_CRITEMP_RECOVERY 25
3648#define FX00_HOST_INFO_RESEND 26 3690#define FX00_HOST_INFO_RESEND 26
3691#define QPAIR_ONLINE_CHECK_NEEDED 27
3649 3692
3650 unsigned long pci_flags; 3693 unsigned long pci_flags;
3651#define PFLG_DISCONNECTED 0 /* PCI device removed */ 3694#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -3704,10 +3747,13 @@ typedef struct scsi_qla_host {
3704 /* List of pending PLOGI acks, protected by hw lock */ 3747 /* List of pending PLOGI acks, protected by hw lock */
3705 struct list_head plogi_ack_list; 3748 struct list_head plogi_ack_list;
3706 3749
3750 struct list_head qp_list;
3751
3707 uint32_t vp_abort_cnt; 3752 uint32_t vp_abort_cnt;
3708 3753
3709 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ 3754 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
3710 uint16_t vp_idx; /* vport ID */ 3755 uint16_t vp_idx; /* vport ID */
3756 struct qla_qpair *qpair; /* base qpair */
3711 3757
3712 unsigned long vp_flags; 3758 unsigned long vp_flags;
3713#define VP_IDX_ACQUIRED 0 /* bit no 0 */ 3759#define VP_IDX_ACQUIRED 0 /* bit no 0 */
@@ -3763,6 +3809,23 @@ struct qla_tgt_vp_map {
3763 scsi_qla_host_t *vha; 3809 scsi_qla_host_t *vha;
3764}; 3810};
3765 3811
3812struct qla2_sgx {
3813 dma_addr_t dma_addr; /* OUT */
3814 uint32_t dma_len; /* OUT */
3815
3816 uint32_t tot_bytes; /* IN */
3817 struct scatterlist *cur_sg; /* IN */
3818
3819 /* for book keeping, bzero on initial invocation */
3820 uint32_t bytes_consumed;
3821 uint32_t num_bytes;
3822 uint32_t tot_partial;
3823
3824 /* for debugging */
3825 uint32_t num_sg;
3826 srb_t *sp;
3827};
3828
3766/* 3829/*
3767 * Macros to help code, maintain, etc. 3830 * Macros to help code, maintain, etc.
3768 */ 3831 */
@@ -3775,21 +3838,34 @@ struct qla_tgt_vp_map {
3775 (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \ 3838 (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
3776 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) 3839 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
3777 3840
3778#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \ 3841#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
3779 atomic_inc(&__vha->vref_count); \ 3842 atomic_inc(&__vha->vref_count); \
3780 mb(); \ 3843 mb(); \
3781 if (__vha->flags.delete_progress) { \ 3844 if (__vha->flags.delete_progress) { \
3782 atomic_dec(&__vha->vref_count); \ 3845 atomic_dec(&__vha->vref_count); \
3783 __bail = 1; \ 3846 __bail = 1; \
3784 } else { \ 3847 } else { \
3785 __bail = 0; \ 3848 __bail = 0; \
3786 } \ 3849 } \
3787} while (0) 3850} while (0)
3788 3851
3789#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \ 3852#define QLA_VHA_MARK_NOT_BUSY(__vha) \
3790 atomic_dec(&__vha->vref_count); \ 3853 atomic_dec(&__vha->vref_count); \
3854
3855#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \
3856 atomic_inc(&__qpair->ref_count); \
3857 mb(); \
3858 if (__qpair->delete_in_progress) { \
3859 atomic_dec(&__qpair->ref_count); \
3860 __bail = 1; \
3861 } else { \
3862 __bail = 0; \
3863 } \
3791} while (0) 3864} while (0)
3792 3865
3866#define QLA_QPAIR_MARK_NOT_BUSY(__qpair) \
3867 atomic_dec(&__qpair->ref_count); \
3868
3793/* 3869/*
3794 * qla2x00 local function return status codes 3870 * qla2x00 local function return status codes
3795 */ 3871 */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index c51d9f3359e3..afa0116a163b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -91,12 +91,17 @@ extern int
91qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *); 91qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
92extern int qla2x00_init_rings(scsi_qla_host_t *); 92extern int qla2x00_init_rings(scsi_qla_host_t *);
93extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *); 93extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
94extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
95 int, int);
96extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
94 97
95/* 98/*
96 * Global Data in qla_os.c source file. 99 * Global Data in qla_os.c source file.
97 */ 100 */
98extern char qla2x00_version_str[]; 101extern char qla2x00_version_str[];
99 102
103extern struct kmem_cache *srb_cachep;
104
100extern int ql2xlogintimeout; 105extern int ql2xlogintimeout;
101extern int qlport_down_retry; 106extern int qlport_down_retry;
102extern int ql2xplogiabsentdevice; 107extern int ql2xplogiabsentdevice;
@@ -105,8 +110,7 @@ extern int ql2xfdmienable;
105extern int ql2xallocfwdump; 110extern int ql2xallocfwdump;
106extern int ql2xextended_error_logging; 111extern int ql2xextended_error_logging;
107extern int ql2xiidmaenable; 112extern int ql2xiidmaenable;
108extern int ql2xmaxqueues; 113extern int ql2xmqsupport;
109extern int ql2xmultique_tag;
110extern int ql2xfwloadbin; 114extern int ql2xfwloadbin;
111extern int ql2xetsenable; 115extern int ql2xetsenable;
112extern int ql2xshiftctondsd; 116extern int ql2xshiftctondsd;
@@ -172,6 +176,9 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
172 176
173extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); 177extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
174extern void qla2x00_disable_board_on_pci_error(struct work_struct *); 178extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
179extern void qla2x00_sp_compl(void *, void *, int);
180extern void qla2xxx_qpair_sp_free_dma(void *, void *);
181extern void qla2xxx_qpair_sp_compl(void *, void *, int);
175 182
176/* 183/*
177 * Global Functions in qla_mid.c source file. 184 * Global Functions in qla_mid.c source file.
@@ -220,6 +227,8 @@ extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
220extern uint16_t qla2x00_calc_iocbs_64(uint16_t); 227extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
221extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t); 228extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
222extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t); 229extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
230extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *,
231 uint16_t, struct req_que *);
223extern int qla2x00_start_scsi(srb_t *sp); 232extern int qla2x00_start_scsi(srb_t *sp);
224extern int qla24xx_start_scsi(srb_t *sp); 233extern int qla24xx_start_scsi(srb_t *sp);
225int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, 234int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
@@ -227,6 +236,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
227extern int qla2x00_start_sp(srb_t *); 236extern int qla2x00_start_sp(srb_t *);
228extern int qla24xx_dif_start_scsi(srb_t *); 237extern int qla24xx_dif_start_scsi(srb_t *);
229extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t); 238extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
239extern int qla2xxx_dif_start_scsi_mq(srb_t *);
230extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); 240extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
231 241
232extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); 242extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
@@ -237,7 +247,10 @@ extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
237 uint32_t *, uint16_t, struct qla_tgt_cmd *); 247 uint32_t *, uint16_t, struct qla_tgt_cmd *);
238extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, 248extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
239 uint32_t *, uint16_t, struct qla_tgt_cmd *); 249 uint32_t *, uint16_t, struct qla_tgt_cmd *);
240 250extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
251extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
252extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
253 struct cmd_type_crc_2 *, uint16_t, uint16_t, uint16_t);
241 254
242/* 255/*
243 * Global Function Prototypes in qla_mbx.c source file. 256 * Global Function Prototypes in qla_mbx.c source file.
@@ -468,6 +481,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *,
468extern void 481extern void
469qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *, 482qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
470 uint32_t); 483 uint32_t);
484extern irqreturn_t
485qla2xxx_msix_rsp_q(int irq, void *dev_id);
471 486
472/* 487/*
473 * Global Function Prototypes in qla_sup.c source file. 488 * Global Function Prototypes in qla_sup.c source file.
@@ -603,15 +618,18 @@ extern int qla2x00_dfs_setup(scsi_qla_host_t *);
603extern int qla2x00_dfs_remove(scsi_qla_host_t *); 618extern int qla2x00_dfs_remove(scsi_qla_host_t *);
604 619
605/* Globa function prototypes for multi-q */ 620/* Globa function prototypes for multi-q */
606extern int qla25xx_request_irq(struct rsp_que *); 621extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *,
622 struct qla_msix_entry *, int);
607extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); 623extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
608extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); 624extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
609extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, 625extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
610 uint16_t, int, uint8_t); 626 uint16_t, int, uint8_t);
611extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, 627extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
612 uint16_t, int); 628 uint16_t, struct qla_qpair *);
629
613extern void qla2x00_init_response_q_entries(struct rsp_que *); 630extern void qla2x00_init_response_q_entries(struct rsp_que *);
614extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *); 631extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
632extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
615extern int qla25xx_delete_queues(struct scsi_qla_host *); 633extern int qla25xx_delete_queues(struct scsi_qla_host *);
616extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t); 634extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
617extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t); 635extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 5b09296b46a3..632d5f30386a 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1769,8 +1769,7 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
1769 if (req->outstanding_cmds) 1769 if (req->outstanding_cmds)
1770 return QLA_SUCCESS; 1770 return QLA_SUCCESS;
1771 1771
1772 if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase && 1772 if (!IS_FWI2_CAPABLE(ha))
1773 (ql2xmultique_tag || ql2xmaxqueues > 1)))
1774 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS; 1773 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
1775 else { 1774 else {
1776 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count) 1775 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
@@ -4248,10 +4247,7 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
4248 struct req_que *req; 4247 struct req_que *req;
4249 struct rsp_que *rsp; 4248 struct rsp_que *rsp;
4250 4249
4251 if (vha->hw->flags.cpu_affinity_enabled) 4250 req = vha->req;
4252 req = vha->hw->req_q_map[0];
4253 else
4254 req = vha->req;
4255 rsp = req->rsp; 4251 rsp = req->rsp;
4256 4252
4257 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 4253 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -6040,10 +6036,10 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
6040 return -EINVAL; 6036 return -EINVAL;
6041 6037
6042 rval = qla2x00_fw_ready(base_vha); 6038 rval = qla2x00_fw_ready(base_vha);
6043 if (ha->flags.cpu_affinity_enabled) 6039 if (vha->qpair)
6044 req = ha->req_q_map[0]; 6040 req = vha->qpair->req;
6045 else 6041 else
6046 req = vha->req; 6042 req = ha->req_q_map[0];
6047 rsp = req->rsp; 6043 rsp = req->rsp;
6048 6044
6049 if (rval == QLA_SUCCESS) { 6045 if (rval == QLA_SUCCESS) {
@@ -6725,3 +6721,162 @@ qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
6725 6721
6726 return ret; 6722 return ret;
6727} 6723}
6724
6725struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int vp_idx)
6726{
6727 int rsp_id = 0;
6728 int req_id = 0;
6729 int i;
6730 struct qla_hw_data *ha = vha->hw;
6731 uint16_t qpair_id = 0;
6732 struct qla_qpair *qpair = NULL;
6733 struct qla_msix_entry *msix;
6734
6735 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
6736 ql_log(ql_log_warn, vha, 0x00181,
6737 "FW/Driver is not multi-queue capable.\n");
6738 return NULL;
6739 }
6740
6741 if (ql2xmqsupport) {
6742 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
6743 if (qpair == NULL) {
6744 ql_log(ql_log_warn, vha, 0x0182,
6745 "Failed to allocate memory for queue pair.\n");
6746 return NULL;
6747 }
6748 memset(qpair, 0, sizeof(struct qla_qpair));
6749
6750 qpair->hw = vha->hw;
6751
6752 /* Assign available que pair id */
6753 mutex_lock(&ha->mq_lock);
6754 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
6755 if (qpair_id >= ha->max_qpairs) {
6756 mutex_unlock(&ha->mq_lock);
6757 ql_log(ql_log_warn, vha, 0x0183,
6758 "No resources to create additional q pair.\n");
6759 goto fail_qid_map;
6760 }
6761 set_bit(qpair_id, ha->qpair_qid_map);
6762 ha->queue_pair_map[qpair_id] = qpair;
6763 qpair->id = qpair_id;
6764 qpair->vp_idx = vp_idx;
6765
6766 for (i = 0; i < ha->msix_count; i++) {
6767 msix = &ha->msix_entries[i];
6768 if (msix->in_use)
6769 continue;
6770 qpair->msix = msix;
6771 ql_log(ql_dbg_multiq, vha, 0xc00f,
6772 "Vector %x selected for qpair\n", msix->vector);
6773 break;
6774 }
6775 if (!qpair->msix) {
6776 ql_log(ql_log_warn, vha, 0x0184,
6777 "Out of MSI-X vectors!.\n");
6778 goto fail_msix;
6779 }
6780
6781 qpair->msix->in_use = 1;
6782 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
6783
6784 mutex_unlock(&ha->mq_lock);
6785
6786 /* Create response queue first */
6787 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair);
6788 if (!rsp_id) {
6789 ql_log(ql_log_warn, vha, 0x0185,
6790 "Failed to create response queue.\n");
6791 goto fail_rsp;
6792 }
6793
6794 qpair->rsp = ha->rsp_q_map[rsp_id];
6795
6796 /* Create request queue */
6797 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos);
6798 if (!req_id) {
6799 ql_log(ql_log_warn, vha, 0x0186,
6800 "Failed to create request queue.\n");
6801 goto fail_req;
6802 }
6803
6804 qpair->req = ha->req_q_map[req_id];
6805 qpair->rsp->req = qpair->req;
6806
6807 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
6808 if (ha->fw_attributes & BIT_4)
6809 qpair->difdix_supported = 1;
6810 }
6811
6812 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
6813 if (!qpair->srb_mempool) {
6814 ql_log(ql_log_warn, vha, 0x0191,
6815 "Failed to create srb mempool for qpair %d\n",
6816 qpair->id);
6817 goto fail_mempool;
6818 }
6819
6820 /* Mark as online */
6821 qpair->online = 1;
6822
6823 if (!vha->flags.qpairs_available)
6824 vha->flags.qpairs_available = 1;
6825
6826 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
6827 "Request/Response queue pair created, id %d\n",
6828 qpair->id);
6829 ql_dbg(ql_dbg_init, vha, 0x0187,
6830 "Request/Response queue pair created, id %d\n",
6831 qpair->id);
6832 }
6833 return qpair;
6834
6835fail_mempool:
6836fail_req:
6837 qla25xx_delete_rsp_que(vha, qpair->rsp);
6838fail_rsp:
6839 mutex_lock(&ha->mq_lock);
6840 qpair->msix->in_use = 0;
6841 list_del(&qpair->qp_list_elem);
6842 if (list_empty(&vha->qp_list))
6843 vha->flags.qpairs_available = 0;
6844fail_msix:
6845 ha->queue_pair_map[qpair_id] = NULL;
6846 clear_bit(qpair_id, ha->qpair_qid_map);
6847 mutex_unlock(&ha->mq_lock);
6848fail_qid_map:
6849 kfree(qpair);
6850 return NULL;
6851}
6852
6853int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
6854{
6855 int ret;
6856 struct qla_hw_data *ha = qpair->hw;
6857
6858 qpair->delete_in_progress = 1;
6859 while (atomic_read(&qpair->ref_count))
6860 msleep(500);
6861
6862 ret = qla25xx_delete_req_que(vha, qpair->req);
6863 if (ret != QLA_SUCCESS)
6864 goto fail;
6865 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
6866 if (ret != QLA_SUCCESS)
6867 goto fail;
6868
6869 mutex_lock(&ha->mq_lock);
6870 ha->queue_pair_map[qpair->id] = NULL;
6871 clear_bit(qpair->id, ha->qpair_qid_map);
6872 list_del(&qpair->qp_list_elem);
6873 if (list_empty(&vha->qp_list))
6874 vha->flags.qpairs_available = 0;
6875 mempool_destroy(qpair->srb_mempool);
6876 kfree(qpair);
6877 mutex_unlock(&ha->mq_lock);
6878
6879 return QLA_SUCCESS;
6880fail:
6881 return ret;
6882}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index edc48f3b8230..44e404583c86 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -216,6 +216,36 @@ qla2x00_reset_active(scsi_qla_host_t *vha)
216} 216}
217 217
218static inline srb_t * 218static inline srb_t *
219qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
220{
221 srb_t *sp = NULL;
222 uint8_t bail;
223
224 QLA_QPAIR_MARK_BUSY(qpair, bail);
225 if (unlikely(bail))
226 return NULL;
227
228 sp = mempool_alloc(qpair->srb_mempool, flag);
229 if (!sp)
230 goto done;
231
232 memset(sp, 0, sizeof(*sp));
233 sp->fcport = fcport;
234 sp->iocbs = 1;
235done:
236 if (!sp)
237 QLA_QPAIR_MARK_NOT_BUSY(qpair);
238 return sp;
239}
240
241static inline void
242qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
243{
244 mempool_free(sp, qpair->srb_mempool);
245 QLA_QPAIR_MARK_NOT_BUSY(qpair);
246}
247
248static inline srb_t *
219qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) 249qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
220{ 250{
221 srb_t *sp = NULL; 251 srb_t *sp = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 221ad8907893..58e49a3e1de8 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -12,7 +12,6 @@
12 12
13#include <scsi/scsi_tcq.h> 13#include <scsi/scsi_tcq.h>
14 14
15static void qla25xx_set_que(srb_t *, struct rsp_que **);
16/** 15/**
17 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18 * @cmd: SCSI command 17 * @cmd: SCSI command
@@ -143,7 +142,7 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
143 return (cont_pkt); 142 return (cont_pkt);
144} 143}
145 144
146static inline int 145inline int
147qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) 146qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148{ 147{
149 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 148 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
@@ -693,10 +692,11 @@ qla24xx_calc_dsd_lists(uint16_t dsds)
693 * @sp: SRB command to process 692 * @sp: SRB command to process
694 * @cmd_pkt: Command type 3 IOCB 693 * @cmd_pkt: Command type 3 IOCB
695 * @tot_dsds: Total number of segments to transfer 694 * @tot_dsds: Total number of segments to transfer
695 * @req: pointer to request queue
696 */ 696 */
697static inline void 697inline void
698qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, 698qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
699 uint16_t tot_dsds) 699 uint16_t tot_dsds, struct req_que *req)
700{ 700{
701 uint16_t avail_dsds; 701 uint16_t avail_dsds;
702 uint32_t *cur_dsd; 702 uint32_t *cur_dsd;
@@ -745,7 +745,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
745 * Five DSDs are available in the Continuation 745 * Five DSDs are available in the Continuation
746 * Type 1 IOCB. 746 * Type 1 IOCB.
747 */ 747 */
748 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 748 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
749 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 749 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
750 avail_dsds = 5; 750 avail_dsds = 5;
751 } 751 }
@@ -845,24 +845,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
845 } 845 }
846} 846}
847 847
848struct qla2_sgx { 848int
849 dma_addr_t dma_addr; /* OUT */
850 uint32_t dma_len; /* OUT */
851
852 uint32_t tot_bytes; /* IN */
853 struct scatterlist *cur_sg; /* IN */
854
855 /* for book keeping, bzero on initial invocation */
856 uint32_t bytes_consumed;
857 uint32_t num_bytes;
858 uint32_t tot_partial;
859
860 /* for debugging */
861 uint32_t num_sg;
862 srb_t *sp;
863};
864
865static int
866qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, 849qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
867 uint32_t *partial) 850 uint32_t *partial)
868{ 851{
@@ -1207,7 +1190,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1207 * @cmd_pkt: Command type 3 IOCB 1190 * @cmd_pkt: Command type 3 IOCB
1208 * @tot_dsds: Total number of segments to transfer 1191 * @tot_dsds: Total number of segments to transfer
1209 */ 1192 */
1210static inline int 1193inline int
1211qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, 1194qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1212 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) 1195 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1213{ 1196{
@@ -1436,8 +1419,8 @@ qla24xx_start_scsi(srb_t *sp)
1436 struct qla_hw_data *ha = vha->hw; 1419 struct qla_hw_data *ha = vha->hw;
1437 1420
1438 /* Setup device pointers. */ 1421 /* Setup device pointers. */
1439 qla25xx_set_que(sp, &rsp);
1440 req = vha->req; 1422 req = vha->req;
1423 rsp = req->rsp;
1441 1424
1442 /* So we know we haven't pci_map'ed anything yet */ 1425 /* So we know we haven't pci_map'ed anything yet */
1443 tot_dsds = 0; 1426 tot_dsds = 0;
@@ -1523,12 +1506,10 @@ qla24xx_start_scsi(srb_t *sp)
1523 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 1506 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1524 1507
1525 /* Build IOCB segments */ 1508 /* Build IOCB segments */
1526 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); 1509 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1527 1510
1528 /* Set total data segment count. */ 1511 /* Set total data segment count. */
1529 cmd_pkt->entry_count = (uint8_t)req_cnt; 1512 cmd_pkt->entry_count = (uint8_t)req_cnt;
1530 /* Specify response queue number where completion should happen */
1531 cmd_pkt->entry_status = (uint8_t) rsp->id;
1532 wmb(); 1513 wmb();
1533 /* Adjust ring index. */ 1514 /* Adjust ring index. */
1534 req->ring_index++; 1515 req->ring_index++;
@@ -1597,9 +1578,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
1597 } 1578 }
1598 1579
1599 /* Setup device pointers. */ 1580 /* Setup device pointers. */
1600
1601 qla25xx_set_que(sp, &rsp);
1602 req = vha->req; 1581 req = vha->req;
1582 rsp = req->rsp;
1603 1583
1604 /* So we know we haven't pci_map'ed anything yet */ 1584 /* So we know we haven't pci_map'ed anything yet */
1605 tot_dsds = 0; 1585 tot_dsds = 0;
@@ -1764,18 +1744,365 @@ queuing_error:
1764 return QLA_FUNCTION_FAILED; 1744 return QLA_FUNCTION_FAILED;
1765} 1745}
1766 1746
1767 1747/**
1768static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) 1748 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1749 * @sp: command to send to the ISP
1750 *
1751 * Returns non-zero if a failure occurred, else zero.
1752 */
1753static int
1754qla2xxx_start_scsi_mq(srb_t *sp)
1769{ 1755{
1756 int nseg;
1757 unsigned long flags;
1758 uint32_t *clr_ptr;
1759 uint32_t index;
1760 uint32_t handle;
1761 struct cmd_type_7 *cmd_pkt;
1762 uint16_t cnt;
1763 uint16_t req_cnt;
1764 uint16_t tot_dsds;
1765 struct req_que *req = NULL;
1766 struct rsp_que *rsp = NULL;
1770 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1767 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1771 struct qla_hw_data *ha = sp->fcport->vha->hw; 1768 struct scsi_qla_host *vha = sp->fcport->vha;
1772 int affinity = cmd->request->cpu; 1769 struct qla_hw_data *ha = vha->hw;
1770 struct qla_qpair *qpair = sp->qpair;
1771
1772 /* Setup qpair pointers */
1773 rsp = qpair->rsp;
1774 req = qpair->req;
1775
1776 /* So we know we haven't pci_map'ed anything yet */
1777 tot_dsds = 0;
1778
1779 /* Send marker if required */
1780 if (vha->marker_needed != 0) {
1781 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1782 QLA_SUCCESS)
1783 return QLA_FUNCTION_FAILED;
1784 vha->marker_needed = 0;
1785 }
1786
1787 /* Acquire qpair specific lock */
1788 spin_lock_irqsave(&qpair->qp_lock, flags);
1789
1790 /* Check for room in outstanding command list. */
1791 handle = req->current_outstanding_cmd;
1792 for (index = 1; index < req->num_outstanding_cmds; index++) {
1793 handle++;
1794 if (handle == req->num_outstanding_cmds)
1795 handle = 1;
1796 if (!req->outstanding_cmds[handle])
1797 break;
1798 }
1799 if (index == req->num_outstanding_cmds)
1800 goto queuing_error;
1801
1802 /* Map the sg table so we have an accurate count of sg entries needed */
1803 if (scsi_sg_count(cmd)) {
1804 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1805 scsi_sg_count(cmd), cmd->sc_data_direction);
1806 if (unlikely(!nseg))
1807 goto queuing_error;
1808 } else
1809 nseg = 0;
1810
1811 tot_dsds = nseg;
1812 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1813 if (req->cnt < (req_cnt + 2)) {
1814 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1815 RD_REG_DWORD_RELAXED(req->req_q_out);
1816 if (req->ring_index < cnt)
1817 req->cnt = cnt - req->ring_index;
1818 else
1819 req->cnt = req->length -
1820 (req->ring_index - cnt);
1821 if (req->cnt < (req_cnt + 2))
1822 goto queuing_error;
1823 }
1824
1825 /* Build command packet. */
1826 req->current_outstanding_cmd = handle;
1827 req->outstanding_cmds[handle] = sp;
1828 sp->handle = handle;
1829 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1830 req->cnt -= req_cnt;
1831
1832 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1833 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1834
1835 /* Zero out remaining portion of packet. */
1836 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1837 clr_ptr = (uint32_t *)cmd_pkt + 2;
1838 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1839 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1840
1841 /* Set NPORT-ID and LUN number*/
1842 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1843 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1844 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1845 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1846 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1847
1848 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1849 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1850
1851 cmd_pkt->task = TSK_SIMPLE;
1852
1853 /* Load SCSI command packet. */
1854 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1855 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1856
1857 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1858
1859 /* Build IOCB segments */
1860 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1861
1862 /* Set total data segment count. */
1863 cmd_pkt->entry_count = (uint8_t)req_cnt;
1864 wmb();
1865 /* Adjust ring index. */
1866 req->ring_index++;
1867 if (req->ring_index == req->length) {
1868 req->ring_index = 0;
1869 req->ring_ptr = req->ring;
1870 } else
1871 req->ring_ptr++;
1872
1873 sp->flags |= SRB_DMA_VALID;
1874
1875 /* Set chip new ring index. */
1876 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1877
1878 /* Manage unprocessed RIO/ZIO commands in response queue. */
1879 if (vha->flags.process_response_queue &&
1880 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1881 qla24xx_process_response_queue(vha, rsp);
1882
1883 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1884 return QLA_SUCCESS;
1885
1886queuing_error:
1887 if (tot_dsds)
1888 scsi_dma_unmap(cmd);
1889
1890 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1891
1892 return QLA_FUNCTION_FAILED;
1893}
1894
1895
1896/**
1897 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1898 * @sp: command to send to the ISP
1899 *
1900 * Returns non-zero if a failure occurred, else zero.
1901 */
1902int
1903qla2xxx_dif_start_scsi_mq(srb_t *sp)
1904{
1905 int nseg;
1906 unsigned long flags;
1907 uint32_t *clr_ptr;
1908 uint32_t index;
1909 uint32_t handle;
1910 uint16_t cnt;
1911 uint16_t req_cnt = 0;
1912 uint16_t tot_dsds;
1913 uint16_t tot_prot_dsds;
1914 uint16_t fw_prot_opts = 0;
1915 struct req_que *req = NULL;
1916 struct rsp_que *rsp = NULL;
1917 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1918 struct scsi_qla_host *vha = sp->fcport->vha;
1919 struct qla_hw_data *ha = vha->hw;
1920 struct cmd_type_crc_2 *cmd_pkt;
1921 uint32_t status = 0;
1922 struct qla_qpair *qpair = sp->qpair;
1923
1924#define QDSS_GOT_Q_SPACE BIT_0
1925
1926 /* Check for host side state */
1927 if (!qpair->online) {
1928 cmd->result = DID_NO_CONNECT << 16;
1929 return QLA_INTERFACE_ERROR;
1930 }
1931
1932 if (!qpair->difdix_supported &&
1933 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1934 cmd->result = DID_NO_CONNECT << 16;
1935 return QLA_INTERFACE_ERROR;
1936 }
1937
1938 /* Only process protection or >16 cdb in this routine */
1939 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1940 if (cmd->cmd_len <= 16)
1941 return qla2xxx_start_scsi_mq(sp);
1942 }
1943
1944 /* Setup qpair pointers */
1945 rsp = qpair->rsp;
1946 req = qpair->req;
1947
1948 /* So we know we haven't pci_map'ed anything yet */
1949 tot_dsds = 0;
1950
1951 /* Send marker if required */
1952 if (vha->marker_needed != 0) {
1953 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1954 QLA_SUCCESS)
1955 return QLA_FUNCTION_FAILED;
1956 vha->marker_needed = 0;
1957 }
1958
1959 /* Acquire ring specific lock */
1960 spin_lock_irqsave(&qpair->qp_lock, flags);
1961
1962 /* Check for room in outstanding command list. */
1963 handle = req->current_outstanding_cmd;
1964 for (index = 1; index < req->num_outstanding_cmds; index++) {
1965 handle++;
1966 if (handle == req->num_outstanding_cmds)
1967 handle = 1;
1968 if (!req->outstanding_cmds[handle])
1969 break;
1970 }
1971
1972 if (index == req->num_outstanding_cmds)
1973 goto queuing_error;
1974
1975 /* Compute number of required data segments */
1976 /* Map the sg table so we have an accurate count of sg entries needed */
1977 if (scsi_sg_count(cmd)) {
1978 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1979 scsi_sg_count(cmd), cmd->sc_data_direction);
1980 if (unlikely(!nseg))
1981 goto queuing_error;
1982 else
1983 sp->flags |= SRB_DMA_VALID;
1984
1985 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1986 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1987 struct qla2_sgx sgx;
1988 uint32_t partial;
1989
1990 memset(&sgx, 0, sizeof(struct qla2_sgx));
1991 sgx.tot_bytes = scsi_bufflen(cmd);
1992 sgx.cur_sg = scsi_sglist(cmd);
1993 sgx.sp = sp;
1994
1995 nseg = 0;
1996 while (qla24xx_get_one_block_sg(
1997 cmd->device->sector_size, &sgx, &partial))
1998 nseg++;
1999 }
2000 } else
2001 nseg = 0;
2002
2003 /* number of required data segments */
2004 tot_dsds = nseg;
2005
2006 /* Compute number of required protection segments */
2007 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2008 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2009 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2010 if (unlikely(!nseg))
2011 goto queuing_error;
2012 else
2013 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2014
2015 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2016 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2017 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2018 }
2019 } else {
2020 nseg = 0;
2021 }
2022
2023 req_cnt = 1;
2024 /* Total Data and protection sg segment(s) */
2025 tot_prot_dsds = nseg;
2026 tot_dsds += nseg;
2027 if (req->cnt < (req_cnt + 2)) {
2028 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2029 RD_REG_DWORD_RELAXED(req->req_q_out);
2030 if (req->ring_index < cnt)
2031 req->cnt = cnt - req->ring_index;
2032 else
2033 req->cnt = req->length -
2034 (req->ring_index - cnt);
2035 if (req->cnt < (req_cnt + 2))
2036 goto queuing_error;
2037 }
2038
2039 status |= QDSS_GOT_Q_SPACE;
2040
2041 /* Build header part of command packet (excluding the OPCODE). */
2042 req->current_outstanding_cmd = handle;
2043 req->outstanding_cmds[handle] = sp;
2044 sp->handle = handle;
2045 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2046 req->cnt -= req_cnt;
2047
2048 /* Fill-in common area */
2049 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2050 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2051
2052 clr_ptr = (uint32_t *)cmd_pkt + 2;
2053 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2054
2055 /* Set NPORT-ID and LUN number*/
2056 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2057 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2058 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2059 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1773 2060
1774 if (ha->flags.cpu_affinity_enabled && affinity >= 0 && 2061 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1775 affinity < ha->max_rsp_queues - 1) 2062 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1776 *rsp = ha->rsp_q_map[affinity + 1]; 2063
1777 else 2064 /* Total Data and protection segment(s) */
1778 *rsp = ha->rsp_q_map[0]; 2065 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2066
2067 /* Build IOCB segments and adjust for data protection segments */
2068 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2069 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2070 QLA_SUCCESS)
2071 goto queuing_error;
2072
2073 cmd_pkt->entry_count = (uint8_t)req_cnt;
2074 cmd_pkt->timeout = cpu_to_le16(0);
2075 wmb();
2076
2077 /* Adjust ring index. */
2078 req->ring_index++;
2079 if (req->ring_index == req->length) {
2080 req->ring_index = 0;
2081 req->ring_ptr = req->ring;
2082 } else
2083 req->ring_ptr++;
2084
2085 /* Set chip new ring index. */
2086 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2087
2088 /* Manage unprocessed RIO/ZIO commands in response queue. */
2089 if (vha->flags.process_response_queue &&
2090 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2091 qla24xx_process_response_queue(vha, rsp);
2092
2093 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2094
2095 return QLA_SUCCESS;
2096
2097queuing_error:
2098 if (status & QDSS_GOT_Q_SPACE) {
2099 req->outstanding_cmds[handle] = NULL;
2100 req->cnt += req_cnt;
2101 }
2102 /* Cleanup will be performed by the caller (queuecommand) */
2103
2104 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2105 return QLA_FUNCTION_FAILED;
1779} 2106}
1780 2107
1781/* Generic Control-SRB manipulation functions. */ 2108/* Generic Control-SRB manipulation functions. */
@@ -2664,7 +2991,7 @@ sufficient_dsds:
2664 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 2991 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2665 2992
2666 /* Build IOCB segments */ 2993 /* Build IOCB segments */
2667 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); 2994 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2668 2995
2669 /* Set total data segment count. */ 2996 /* Set total data segment count. */
2670 cmd_pkt->entry_count = (uint8_t)req_cnt; 2997 cmd_pkt->entry_count = (uint8_t)req_cnt;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d8efddf6f312..5093ca9b02ec 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2872,41 +2872,6 @@ out:
2872} 2872}
2873 2873
2874static irqreturn_t 2874static irqreturn_t
2875qla25xx_msix_rsp_q(int irq, void *dev_id)
2876{
2877 struct qla_hw_data *ha;
2878 scsi_qla_host_t *vha;
2879 struct rsp_que *rsp;
2880 struct device_reg_24xx __iomem *reg;
2881 unsigned long flags;
2882 uint32_t hccr = 0;
2883
2884 rsp = (struct rsp_que *) dev_id;
2885 if (!rsp) {
2886 ql_log(ql_log_info, NULL, 0x505b,
2887 "%s: NULL response queue pointer.\n", __func__);
2888 return IRQ_NONE;
2889 }
2890 ha = rsp->hw;
2891 vha = pci_get_drvdata(ha->pdev);
2892
2893 /* Clear the interrupt, if enabled, for this response queue */
2894 if (!ha->flags.disable_msix_handshake) {
2895 reg = &ha->iobase->isp24;
2896 spin_lock_irqsave(&ha->hardware_lock, flags);
2897 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2898 hccr = RD_REG_DWORD_RELAXED(&reg->hccr);
2899 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2900 }
2901 if (qla2x00_check_reg32_for_disconnect(vha, hccr))
2902 goto out;
2903 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2904
2905out:
2906 return IRQ_HANDLED;
2907}
2908
2909static irqreturn_t
2910qla24xx_msix_default(int irq, void *dev_id) 2875qla24xx_msix_default(int irq, void *dev_id)
2911{ 2876{
2912 scsi_qla_host_t *vha; 2877 scsi_qla_host_t *vha;
@@ -3002,6 +2967,35 @@ qla24xx_msix_default(int irq, void *dev_id)
3002 return IRQ_HANDLED; 2967 return IRQ_HANDLED;
3003} 2968}
3004 2969
2970irqreturn_t
2971qla2xxx_msix_rsp_q(int irq, void *dev_id)
2972{
2973 struct qla_hw_data *ha;
2974 struct qla_qpair *qpair;
2975 struct device_reg_24xx __iomem *reg;
2976 unsigned long flags;
2977
2978 qpair = dev_id;
2979 if (!qpair) {
2980 ql_log(ql_log_info, NULL, 0x505b,
2981 "%s: NULL response queue pointer.\n", __func__);
2982 return IRQ_NONE;
2983 }
2984 ha = qpair->hw;
2985
2986 /* Clear the interrupt, if enabled, for this response queue */
2987 if (unlikely(!ha->flags.disable_msix_handshake)) {
2988 reg = &ha->iobase->isp24;
2989 spin_lock_irqsave(&ha->hardware_lock, flags);
2990 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2991 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2992 }
2993
2994 queue_work(ha->wq, &qpair->q_work);
2995
2996 return IRQ_HANDLED;
2997}
2998
3005/* Interrupt handling helpers. */ 2999/* Interrupt handling helpers. */
3006 3000
3007struct qla_init_msix_entry { 3001struct qla_init_msix_entry {
@@ -3009,69 +3003,28 @@ struct qla_init_msix_entry {
3009 irq_handler_t handler; 3003 irq_handler_t handler;
3010}; 3004};
3011 3005
3012static struct qla_init_msix_entry msix_entries[3] = { 3006static struct qla_init_msix_entry msix_entries[] = {
3013 { "qla2xxx (default)", qla24xx_msix_default }, 3007 { "qla2xxx (default)", qla24xx_msix_default },
3014 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 3008 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
3015 { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, 3009 { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
3010 { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q },
3016}; 3011};
3017 3012
3018static struct qla_init_msix_entry qla82xx_msix_entries[2] = { 3013static struct qla_init_msix_entry qla82xx_msix_entries[] = {
3019 { "qla2xxx (default)", qla82xx_msix_default }, 3014 { "qla2xxx (default)", qla82xx_msix_default },
3020 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 3015 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3021}; 3016};
3022 3017
3023static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
3024 { "qla2xxx (default)", qla24xx_msix_default },
3025 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
3026 { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
3027};
3028
3029static void
3030qla24xx_disable_msix(struct qla_hw_data *ha)
3031{
3032 int i;
3033 struct qla_msix_entry *qentry;
3034 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3035
3036 for (i = 0; i < ha->msix_count; i++) {
3037 qentry = &ha->msix_entries[i];
3038 if (qentry->have_irq) {
3039 /* un-register irq cpu affinity notification */
3040 irq_set_affinity_notifier(qentry->vector, NULL);
3041 free_irq(qentry->vector, qentry->rsp);
3042 }
3043 }
3044 pci_disable_msix(ha->pdev);
3045 kfree(ha->msix_entries);
3046 ha->msix_entries = NULL;
3047 ha->flags.msix_enabled = 0;
3048 ql_dbg(ql_dbg_init, vha, 0x0042,
3049 "Disabled the MSI.\n");
3050}
3051
3052static int 3018static int
3053qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 3019qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3054{ 3020{
3055#define MIN_MSIX_COUNT 2 3021#define MIN_MSIX_COUNT 2
3056#define ATIO_VECTOR 2
3057 int i, ret; 3022 int i, ret;
3058 struct msix_entry *entries;
3059 struct qla_msix_entry *qentry; 3023 struct qla_msix_entry *qentry;
3060 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3024 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3061 3025
3062 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 3026 ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
3063 GFP_KERNEL); 3027 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3064 if (!entries) {
3065 ql_log(ql_log_warn, vha, 0x00bc,
3066 "Failed to allocate memory for msix_entry.\n");
3067 return -ENOMEM;
3068 }
3069
3070 for (i = 0; i < ha->msix_count; i++)
3071 entries[i].entry = i;
3072
3073 ret = pci_enable_msix_range(ha->pdev,
3074 entries, MIN_MSIX_COUNT, ha->msix_count);
3075 if (ret < 0) { 3028 if (ret < 0) {
3076 ql_log(ql_log_fatal, vha, 0x00c7, 3029 ql_log(ql_log_fatal, vha, 0x00c7,
3077 "MSI-X: Failed to enable support, " 3030 "MSI-X: Failed to enable support, "
@@ -3081,10 +3034,23 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3081 } else if (ret < ha->msix_count) { 3034 } else if (ret < ha->msix_count) {
3082 ql_log(ql_log_warn, vha, 0x00c6, 3035 ql_log(ql_log_warn, vha, 0x00c6,
3083 "MSI-X: Failed to enable support " 3036 "MSI-X: Failed to enable support "
3084 "-- %d/%d\n Retry with %d vectors.\n", 3037 "with %d vectors, using %d vectors.\n",
3085 ha->msix_count, ret, ret); 3038 ha->msix_count, ret);
3086 ha->msix_count = ret; 3039 ha->msix_count = ret;
3087 ha->max_rsp_queues = ha->msix_count - 1; 3040 /* Recalculate queue values */
3041 if (ha->mqiobase && ql2xmqsupport) {
3042 ha->max_req_queues = ha->msix_count - 1;
3043
3044 /* ATIOQ needs 1 vector. That's 1 less QPair */
3045 if (QLA_TGT_MODE_ENABLED())
3046 ha->max_req_queues--;
3047
3048 ha->max_rsp_queues = ha->max_req_queues;
3049
3050 ha->max_qpairs = ha->max_req_queues - 1;
3051 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
3052 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
3053 }
3088 } 3054 }
3089 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 3055 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
3090 ha->msix_count, GFP_KERNEL); 3056 ha->msix_count, GFP_KERNEL);
@@ -3098,20 +3064,23 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3098 3064
3099 for (i = 0; i < ha->msix_count; i++) { 3065 for (i = 0; i < ha->msix_count; i++) {
3100 qentry = &ha->msix_entries[i]; 3066 qentry = &ha->msix_entries[i];
3101 qentry->vector = entries[i].vector; 3067 qentry->vector = pci_irq_vector(ha->pdev, i);
3102 qentry->entry = entries[i].entry; 3068 qentry->entry = i;
3103 qentry->have_irq = 0; 3069 qentry->have_irq = 0;
3104 qentry->rsp = NULL; 3070 qentry->in_use = 0;
3071 qentry->handle = NULL;
3105 qentry->irq_notify.notify = qla_irq_affinity_notify; 3072 qentry->irq_notify.notify = qla_irq_affinity_notify;
3106 qentry->irq_notify.release = qla_irq_affinity_release; 3073 qentry->irq_notify.release = qla_irq_affinity_release;
3107 qentry->cpuid = -1; 3074 qentry->cpuid = -1;
3108 } 3075 }
3109 3076
3110 /* Enable MSI-X vectors for the base queue */ 3077 /* Enable MSI-X vectors for the base queue */
3111 for (i = 0; i < 2; i++) { 3078 for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) {
3112 qentry = &ha->msix_entries[i]; 3079 qentry = &ha->msix_entries[i];
3113 qentry->rsp = rsp; 3080 qentry->handle = rsp;
3114 rsp->msix = qentry; 3081 rsp->msix = qentry;
3082 scnprintf(qentry->name, sizeof(qentry->name),
3083 msix_entries[i].name);
3115 if (IS_P3P_TYPE(ha)) 3084 if (IS_P3P_TYPE(ha))
3116 ret = request_irq(qentry->vector, 3085 ret = request_irq(qentry->vector,
3117 qla82xx_msix_entries[i].handler, 3086 qla82xx_msix_entries[i].handler,
@@ -3123,6 +3092,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3123 if (ret) 3092 if (ret)
3124 goto msix_register_fail; 3093 goto msix_register_fail;
3125 qentry->have_irq = 1; 3094 qentry->have_irq = 1;
3095 qentry->in_use = 1;
3126 3096
3127 /* Register for CPU affinity notification. */ 3097 /* Register for CPU affinity notification. */
3128 irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify); 3098 irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
@@ -3142,12 +3112,15 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3142 * queue. 3112 * queue.
3143 */ 3113 */
3144 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { 3114 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
3145 qentry = &ha->msix_entries[ATIO_VECTOR]; 3115 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
3146 qentry->rsp = rsp;
3147 rsp->msix = qentry; 3116 rsp->msix = qentry;
3117 qentry->handle = rsp;
3118 scnprintf(qentry->name, sizeof(qentry->name),
3119 msix_entries[QLA_ATIO_VECTOR].name);
3120 qentry->in_use = 1;
3148 ret = request_irq(qentry->vector, 3121 ret = request_irq(qentry->vector,
3149 qla83xx_msix_entries[ATIO_VECTOR].handler, 3122 msix_entries[QLA_ATIO_VECTOR].handler,
3150 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp); 3123 0, msix_entries[QLA_ATIO_VECTOR].name, rsp);
3151 qentry->have_irq = 1; 3124 qentry->have_irq = 1;
3152 } 3125 }
3153 3126
@@ -3156,7 +3129,7 @@ msix_register_fail:
3156 ql_log(ql_log_fatal, vha, 0x00cb, 3129 ql_log(ql_log_fatal, vha, 0x00cb,
3157 "MSI-X: unable to register handler -- %x/%d.\n", 3130 "MSI-X: unable to register handler -- %x/%d.\n",
3158 qentry->vector, ret); 3131 qentry->vector, ret);
3159 qla24xx_disable_msix(ha); 3132 qla2x00_free_irqs(vha);
3160 ha->mqenable = 0; 3133 ha->mqenable = 0;
3161 goto msix_out; 3134 goto msix_out;
3162 } 3135 }
@@ -3164,11 +3137,13 @@ msix_register_fail:
3164 /* Enable MSI-X vector for response queue update for queue 0 */ 3137 /* Enable MSI-X vector for response queue update for queue 0 */
3165 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3138 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3166 if (ha->msixbase && ha->mqiobase && 3139 if (ha->msixbase && ha->mqiobase &&
3167 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 3140 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3141 ql2xmqsupport))
3168 ha->mqenable = 1; 3142 ha->mqenable = 1;
3169 } else 3143 } else
3170 if (ha->mqiobase 3144 if (ha->mqiobase &&
3171 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 3145 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3146 ql2xmqsupport))
3172 ha->mqenable = 1; 3147 ha->mqenable = 1;
3173 ql_dbg(ql_dbg_multiq, vha, 0xc005, 3148 ql_dbg(ql_dbg_multiq, vha, 0xc005,
3174 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3149 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
@@ -3178,7 +3153,6 @@ msix_register_fail:
3178 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3153 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3179 3154
3180msix_out: 3155msix_out:
3181 kfree(entries);
3182 return ret; 3156 return ret;
3183} 3157}
3184 3158
@@ -3231,7 +3205,7 @@ skip_msix:
3231 !IS_QLA27XX(ha)) 3205 !IS_QLA27XX(ha))
3232 goto skip_msi; 3206 goto skip_msi;
3233 3207
3234 ret = pci_enable_msi(ha->pdev); 3208 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
3235 if (!ret) { 3209 if (!ret) {
3236 ql_dbg(ql_dbg_init, vha, 0x0038, 3210 ql_dbg(ql_dbg_init, vha, 0x0038,
3237 "MSI: Enabled.\n"); 3211 "MSI: Enabled.\n");
@@ -3276,6 +3250,8 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
3276{ 3250{
3277 struct qla_hw_data *ha = vha->hw; 3251 struct qla_hw_data *ha = vha->hw;
3278 struct rsp_que *rsp; 3252 struct rsp_que *rsp;
3253 struct qla_msix_entry *qentry;
3254 int i;
3279 3255
3280 /* 3256 /*
3281 * We need to check that ha->rsp_q_map is valid in case we are called 3257 * We need to check that ha->rsp_q_map is valid in case we are called
@@ -3285,25 +3261,36 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
3285 return; 3261 return;
3286 rsp = ha->rsp_q_map[0]; 3262 rsp = ha->rsp_q_map[0];
3287 3263
3288 if (ha->flags.msix_enabled) 3264 if (ha->flags.msix_enabled) {
3289 qla24xx_disable_msix(ha); 3265 for (i = 0; i < ha->msix_count; i++) {
3290 else if (ha->flags.msi_enabled) { 3266 qentry = &ha->msix_entries[i];
3291 free_irq(ha->pdev->irq, rsp); 3267 if (qentry->have_irq) {
3292 pci_disable_msi(ha->pdev); 3268 irq_set_affinity_notifier(qentry->vector, NULL);
3293 } else 3269 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
3294 free_irq(ha->pdev->irq, rsp); 3270 }
3295} 3271 }
3272 kfree(ha->msix_entries);
3273 ha->msix_entries = NULL;
3274 ha->flags.msix_enabled = 0;
3275 ql_dbg(ql_dbg_init, vha, 0x0042,
3276 "Disabled MSI-X.\n");
3277 } else {
3278 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
3279 }
3296 3280
3281 pci_free_irq_vectors(ha->pdev);
3282}
3297 3283
3298int qla25xx_request_irq(struct rsp_que *rsp) 3284int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
3285 struct qla_msix_entry *msix, int vector_type)
3299{ 3286{
3300 struct qla_hw_data *ha = rsp->hw; 3287 struct qla_init_msix_entry *intr = &msix_entries[vector_type];
3301 struct qla_init_msix_entry *intr = &msix_entries[2];
3302 struct qla_msix_entry *msix = rsp->msix;
3303 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3288 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3304 int ret; 3289 int ret;
3305 3290
3306 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 3291 scnprintf(msix->name, sizeof(msix->name),
3292 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
3293 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
3307 if (ret) { 3294 if (ret) {
3308 ql_log(ql_log_fatal, vha, 0x00e6, 3295 ql_log(ql_log_fatal, vha, 0x00e6,
3309 "MSI-X: Unable to register handler -- %x/%d.\n", 3296 "MSI-X: Unable to register handler -- %x/%d.\n",
@@ -3311,7 +3298,7 @@ int qla25xx_request_irq(struct rsp_que *rsp)
3311 return ret; 3298 return ret;
3312 } 3299 }
3313 msix->have_irq = 1; 3300 msix->have_irq = 1;
3314 msix->rsp = rsp; 3301 msix->handle = qpair;
3315 return ret; 3302 return ret;
3316} 3303}
3317 3304
@@ -3324,11 +3311,12 @@ static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
3324 container_of(notify, struct qla_msix_entry, irq_notify); 3311 container_of(notify, struct qla_msix_entry, irq_notify);
3325 struct qla_hw_data *ha; 3312 struct qla_hw_data *ha;
3326 struct scsi_qla_host *base_vha; 3313 struct scsi_qla_host *base_vha;
3314 struct rsp_que *rsp = e->handle;
3327 3315
3328 /* user is recommended to set mask to just 1 cpu */ 3316 /* user is recommended to set mask to just 1 cpu */
3329 e->cpuid = cpumask_first(mask); 3317 e->cpuid = cpumask_first(mask);
3330 3318
3331 ha = e->rsp->hw; 3319 ha = rsp->hw;
3332 base_vha = pci_get_drvdata(ha->pdev); 3320 base_vha = pci_get_drvdata(ha->pdev);
3333 3321
3334 ql_dbg(ql_dbg_init, base_vha, 0xffff, 3322 ql_dbg(ql_dbg_init, base_vha, 0xffff,
@@ -3352,9 +3340,10 @@ static void qla_irq_affinity_release(struct kref *ref)
3352 container_of(ref, struct irq_affinity_notify, kref); 3340 container_of(ref, struct irq_affinity_notify, kref);
3353 struct qla_msix_entry *e = 3341 struct qla_msix_entry *e =
3354 container_of(notify, struct qla_msix_entry, irq_notify); 3342 container_of(notify, struct qla_msix_entry, irq_notify);
3355 struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev); 3343 struct rsp_que *rsp = e->handle;
3344 struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
3356 3345
3357 ql_dbg(ql_dbg_init, base_vha, 0xffff, 3346 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3358 "%s: host%ld: vector %d cpu %d \n", __func__, 3347 "%s: host%ld: vector %d cpu %d\n", __func__,
3359 base_vha->host_no, e->vector, e->cpuid); 3348 base_vha->host_no, e->vector, e->cpuid);
3360} 3349}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 23698c998699..2819ceb96041 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,6 +10,43 @@
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/gfp.h> 11#include <linux/gfp.h>
12 12
13struct rom_cmd {
14 uint16_t cmd;
15} rom_cmds[] = {
16 { MBC_LOAD_RAM },
17 { MBC_EXECUTE_FIRMWARE },
18 { MBC_READ_RAM_WORD },
19 { MBC_MAILBOX_REGISTER_TEST },
20 { MBC_VERIFY_CHECKSUM },
21 { MBC_GET_FIRMWARE_VERSION },
22 { MBC_LOAD_RISC_RAM },
23 { MBC_DUMP_RISC_RAM },
24 { MBC_LOAD_RISC_RAM_EXTENDED },
25 { MBC_DUMP_RISC_RAM_EXTENDED },
26 { MBC_WRITE_RAM_WORD_EXTENDED },
27 { MBC_READ_RAM_EXTENDED },
28 { MBC_GET_RESOURCE_COUNTS },
29 { MBC_SET_FIRMWARE_OPTION },
30 { MBC_MID_INITIALIZE_FIRMWARE },
31 { MBC_GET_FIRMWARE_STATE },
32 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
33 { MBC_GET_RETRY_COUNT },
34 { MBC_TRACE_CONTROL },
35};
36
37static int is_rom_cmd(uint16_t cmd)
38{
39 int i;
40 struct rom_cmd *wc;
41
42 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
43 wc = rom_cmds + i;
44 if (wc->cmd == cmd)
45 return 1;
46 }
47
48 return 0;
49}
13 50
14/* 51/*
15 * qla2x00_mailbox_command 52 * qla2x00_mailbox_command
@@ -92,6 +129,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
92 return QLA_FUNCTION_TIMEOUT; 129 return QLA_FUNCTION_TIMEOUT;
93 } 130 }
94 131
132 /* check if ISP abort is active and return cmd with timeout */
133 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
134 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
135 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
136 !is_rom_cmd(mcp->mb[0])) {
137 ql_log(ql_log_info, vha, 0x1005,
138 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
139 mcp->mb[0]);
140 return QLA_FUNCTION_TIMEOUT;
141 }
142
95 /* 143 /*
96 * Wait for active mailbox commands to finish by waiting at most tov 144 * Wait for active mailbox commands to finish by waiting at most tov
97 * seconds. This is to serialize actual issuing of mailbox cmds during 145 * seconds. This is to serialize actual issuing of mailbox cmds during
@@ -178,6 +226,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
178 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); 226 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
179 spin_unlock_irqrestore(&ha->hardware_lock, flags); 227 spin_unlock_irqrestore(&ha->hardware_lock, flags);
180 228
229 wait_time = jiffies;
181 if (!wait_for_completion_timeout(&ha->mbx_intr_comp, 230 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
182 mcp->tov * HZ)) { 231 mcp->tov * HZ)) {
183 ql_dbg(ql_dbg_mbx, vha, 0x117a, 232 ql_dbg(ql_dbg_mbx, vha, 0x117a,
@@ -186,6 +235,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
186 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 235 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
187 spin_unlock_irqrestore(&ha->hardware_lock, flags); 236 spin_unlock_irqrestore(&ha->hardware_lock, flags);
188 } 237 }
238 if (time_after(jiffies, wait_time + 5 * HZ))
239 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
240 command, jiffies_to_msecs(jiffies - wait_time));
189 } else { 241 } else {
190 ql_dbg(ql_dbg_mbx, vha, 0x1011, 242 ql_dbg(ql_dbg_mbx, vha, 0x1011,
191 "Cmd=%x Polling Mode.\n", command); 243 "Cmd=%x Polling Mode.\n", command);
@@ -1194,12 +1246,17 @@ qla2x00_abort_command(srb_t *sp)
1194 fc_port_t *fcport = sp->fcport; 1246 fc_port_t *fcport = sp->fcport;
1195 scsi_qla_host_t *vha = fcport->vha; 1247 scsi_qla_host_t *vha = fcport->vha;
1196 struct qla_hw_data *ha = vha->hw; 1248 struct qla_hw_data *ha = vha->hw;
1197 struct req_que *req = vha->req; 1249 struct req_que *req;
1198 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1250 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1199 1251
1200 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, 1252 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1201 "Entered %s.\n", __func__); 1253 "Entered %s.\n", __func__);
1202 1254
1255 if (vha->flags.qpairs_available && sp->qpair)
1256 req = sp->qpair->req;
1257 else
1258 req = vha->req;
1259
1203 spin_lock_irqsave(&ha->hardware_lock, flags); 1260 spin_lock_irqsave(&ha->hardware_lock, flags);
1204 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1261 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1205 if (req->outstanding_cmds[handle] == sp) 1262 if (req->outstanding_cmds[handle] == sp)
@@ -2152,10 +2209,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2152 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, 2209 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2153 "Entered %s.\n", __func__); 2210 "Entered %s.\n", __func__);
2154 2211
2155 if (ha->flags.cpu_affinity_enabled) 2212 if (vha->vp_idx && vha->qpair)
2156 req = ha->req_q_map[0]; 2213 req = vha->qpair->req;
2157 else 2214 else
2158 req = vha->req; 2215 req = ha->req_q_map[0];
2159 2216
2160 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2217 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2161 if (lg == NULL) { 2218 if (lg == NULL) {
@@ -2435,10 +2492,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2435 } 2492 }
2436 memset(lg, 0, sizeof(struct logio_entry_24xx)); 2493 memset(lg, 0, sizeof(struct logio_entry_24xx));
2437 2494
2438 if (ql2xmaxqueues > 1) 2495 req = vha->req;
2439 req = ha->req_q_map[0];
2440 else
2441 req = vha->req;
2442 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2496 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2443 lg->entry_count = 1; 2497 lg->entry_count = 1;
2444 lg->handle = MAKE_HANDLE(req->id, lg->handle); 2498 lg->handle = MAKE_HANDLE(req->id, lg->handle);
@@ -2904,6 +2958,9 @@ qla24xx_abort_command(srb_t *sp)
2904 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 2958 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
2905 "Entered %s.\n", __func__); 2959 "Entered %s.\n", __func__);
2906 2960
2961 if (vha->flags.qpairs_available && sp->qpair)
2962 req = sp->qpair->req;
2963
2907 if (ql2xasynctmfenable) 2964 if (ql2xasynctmfenable)
2908 return qla24xx_async_abort_command(sp); 2965 return qla24xx_async_abort_command(sp);
2909 2966
@@ -2984,6 +3041,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2984 struct qla_hw_data *ha; 3041 struct qla_hw_data *ha;
2985 struct req_que *req; 3042 struct req_que *req;
2986 struct rsp_que *rsp; 3043 struct rsp_que *rsp;
3044 struct qla_qpair *qpair;
2987 3045
2988 vha = fcport->vha; 3046 vha = fcport->vha;
2989 ha = vha->hw; 3047 ha = vha->hw;
@@ -2992,10 +3050,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2992 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, 3050 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
2993 "Entered %s.\n", __func__); 3051 "Entered %s.\n", __func__);
2994 3052
2995 if (ha->flags.cpu_affinity_enabled) 3053 if (vha->vp_idx && vha->qpair) {
2996 rsp = ha->rsp_q_map[tag + 1]; 3054 /* NPIV port */
2997 else 3055 qpair = vha->qpair;
3056 rsp = qpair->rsp;
3057 req = qpair->req;
3058 } else {
2998 rsp = req->rsp; 3059 rsp = req->rsp;
3060 }
3061
2999 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 3062 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3000 if (tsk == NULL) { 3063 if (tsk == NULL) {
3001 ql_log(ql_log_warn, vha, 0x1093, 3064 ql_log(ql_log_warn, vha, 0x1093,
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index cf7ba52bae66..c6d6f0d912ff 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -540,9 +540,10 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
540 uint16_t que_id = rsp->id; 540 uint16_t que_id = rsp->id;
541 541
542 if (rsp->msix && rsp->msix->have_irq) { 542 if (rsp->msix && rsp->msix->have_irq) {
543 free_irq(rsp->msix->vector, rsp); 543 free_irq(rsp->msix->vector, rsp->msix->handle);
544 rsp->msix->have_irq = 0; 544 rsp->msix->have_irq = 0;
545 rsp->msix->rsp = NULL; 545 rsp->msix->in_use = 0;
546 rsp->msix->handle = NULL;
546 } 547 }
547 dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) * 548 dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
548 sizeof(response_t), rsp->ring, rsp->dma); 549 sizeof(response_t), rsp->ring, rsp->dma);
@@ -573,7 +574,7 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
573 return ret; 574 return ret;
574} 575}
575 576
576static int 577int
577qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 578qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
578{ 579{
579 int ret = -1; 580 int ret = -1;
@@ -596,34 +597,42 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
596 struct req_que *req = NULL; 597 struct req_que *req = NULL;
597 struct rsp_que *rsp = NULL; 598 struct rsp_que *rsp = NULL;
598 struct qla_hw_data *ha = vha->hw; 599 struct qla_hw_data *ha = vha->hw;
600 struct qla_qpair *qpair, *tqpair;
599 601
600 /* Delete request queues */ 602 if (ql2xmqsupport) {
601 for (cnt = 1; cnt < ha->max_req_queues; cnt++) { 603 list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
602 req = ha->req_q_map[cnt]; 604 qp_list_elem)
603 if (req && test_bit(cnt, ha->req_qid_map)) { 605 qla2xxx_delete_qpair(vha, qpair);
604 ret = qla25xx_delete_req_que(vha, req); 606 } else {
605 if (ret != QLA_SUCCESS) { 607 /* Delete request queues */
606 ql_log(ql_log_warn, vha, 0x00ea, 608 for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
607 "Couldn't delete req que %d.\n", 609 req = ha->req_q_map[cnt];
608 req->id); 610 if (req && test_bit(cnt, ha->req_qid_map)) {
609 return ret; 611 ret = qla25xx_delete_req_que(vha, req);
612 if (ret != QLA_SUCCESS) {
613 ql_log(ql_log_warn, vha, 0x00ea,
614 "Couldn't delete req que %d.\n",
615 req->id);
616 return ret;
617 }
610 } 618 }
611 } 619 }
612 }
613 620
614 /* Delete response queues */ 621 /* Delete response queues */
615 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { 622 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
616 rsp = ha->rsp_q_map[cnt]; 623 rsp = ha->rsp_q_map[cnt];
617 if (rsp && test_bit(cnt, ha->rsp_qid_map)) { 624 if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
618 ret = qla25xx_delete_rsp_que(vha, rsp); 625 ret = qla25xx_delete_rsp_que(vha, rsp);
619 if (ret != QLA_SUCCESS) { 626 if (ret != QLA_SUCCESS) {
620 ql_log(ql_log_warn, vha, 0x00eb, 627 ql_log(ql_log_warn, vha, 0x00eb,
621 "Couldn't delete rsp que %d.\n", 628 "Couldn't delete rsp que %d.\n",
622 rsp->id); 629 rsp->id);
623 return ret; 630 return ret;
631 }
624 } 632 }
625 } 633 }
626 } 634 }
635
627 return ret; 636 return ret;
628} 637}
629 638
@@ -659,10 +668,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
659 if (ret != QLA_SUCCESS) 668 if (ret != QLA_SUCCESS)
660 goto que_failed; 669 goto que_failed;
661 670
662 mutex_lock(&ha->vport_lock); 671 mutex_lock(&ha->mq_lock);
663 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); 672 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
664 if (que_id >= ha->max_req_queues) { 673 if (que_id >= ha->max_req_queues) {
665 mutex_unlock(&ha->vport_lock); 674 mutex_unlock(&ha->mq_lock);
666 ql_log(ql_log_warn, base_vha, 0x00db, 675 ql_log(ql_log_warn, base_vha, 0x00db,
667 "No resources to create additional request queue.\n"); 676 "No resources to create additional request queue.\n");
668 goto que_failed; 677 goto que_failed;
@@ -708,7 +717,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
708 req->req_q_out = &reg->isp25mq.req_q_out; 717 req->req_q_out = &reg->isp25mq.req_q_out;
709 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 718 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
710 req->out_ptr = (void *)(req->ring + req->length); 719 req->out_ptr = (void *)(req->ring + req->length);
711 mutex_unlock(&ha->vport_lock); 720 mutex_unlock(&ha->mq_lock);
712 ql_dbg(ql_dbg_multiq, base_vha, 0xc004, 721 ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
713 "ring_ptr=%p ring_index=%d, " 722 "ring_ptr=%p ring_index=%d, "
714 "cnt=%d id=%d max_q_depth=%d.\n", 723 "cnt=%d id=%d max_q_depth=%d.\n",
@@ -724,9 +733,9 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
724 if (ret != QLA_SUCCESS) { 733 if (ret != QLA_SUCCESS) {
725 ql_log(ql_log_fatal, base_vha, 0x00df, 734 ql_log(ql_log_fatal, base_vha, 0x00df,
726 "%s failed.\n", __func__); 735 "%s failed.\n", __func__);
727 mutex_lock(&ha->vport_lock); 736 mutex_lock(&ha->mq_lock);
728 clear_bit(que_id, ha->req_qid_map); 737 clear_bit(que_id, ha->req_qid_map);
729 mutex_unlock(&ha->vport_lock); 738 mutex_unlock(&ha->mq_lock);
730 goto que_failed; 739 goto que_failed;
731 } 740 }
732 741
@@ -741,20 +750,20 @@ failed:
741static void qla_do_work(struct work_struct *work) 750static void qla_do_work(struct work_struct *work)
742{ 751{
743 unsigned long flags; 752 unsigned long flags;
744 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); 753 struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
745 struct scsi_qla_host *vha; 754 struct scsi_qla_host *vha;
746 struct qla_hw_data *ha = rsp->hw; 755 struct qla_hw_data *ha = qpair->hw;
747 756
748 spin_lock_irqsave(&rsp->hw->hardware_lock, flags); 757 spin_lock_irqsave(&qpair->qp_lock, flags);
749 vha = pci_get_drvdata(ha->pdev); 758 vha = pci_get_drvdata(ha->pdev);
750 qla24xx_process_response_queue(vha, rsp); 759 qla24xx_process_response_queue(vha, qpair->rsp);
751 spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags); 760 spin_unlock_irqrestore(&qpair->qp_lock, flags);
752} 761}
753 762
754/* create response queue */ 763/* create response queue */
755int 764int
756qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, 765qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
757 uint8_t vp_idx, uint16_t rid, int req) 766 uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair)
758{ 767{
759 int ret = 0; 768 int ret = 0;
760 struct rsp_que *rsp = NULL; 769 struct rsp_que *rsp = NULL;
@@ -779,28 +788,24 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
779 goto que_failed; 788 goto que_failed;
780 } 789 }
781 790
782 mutex_lock(&ha->vport_lock); 791 mutex_lock(&ha->mq_lock);
783 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); 792 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
784 if (que_id >= ha->max_rsp_queues) { 793 if (que_id >= ha->max_rsp_queues) {
785 mutex_unlock(&ha->vport_lock); 794 mutex_unlock(&ha->mq_lock);
786 ql_log(ql_log_warn, base_vha, 0x00e2, 795 ql_log(ql_log_warn, base_vha, 0x00e2,
787 "No resources to create additional request queue.\n"); 796 "No resources to create additional request queue.\n");
788 goto que_failed; 797 goto que_failed;
789 } 798 }
790 set_bit(que_id, ha->rsp_qid_map); 799 set_bit(que_id, ha->rsp_qid_map);
791 800
792 if (ha->flags.msix_enabled) 801 rsp->msix = qpair->msix;
793 rsp->msix = &ha->msix_entries[que_id + 1];
794 else
795 ql_log(ql_log_warn, base_vha, 0x00e3,
796 "MSIX not enabled.\n");
797 802
798 ha->rsp_q_map[que_id] = rsp; 803 ha->rsp_q_map[que_id] = rsp;
799 rsp->rid = rid; 804 rsp->rid = rid;
800 rsp->vp_idx = vp_idx; 805 rsp->vp_idx = vp_idx;
801 rsp->hw = ha; 806 rsp->hw = ha;
802 ql_dbg(ql_dbg_init, base_vha, 0x00e4, 807 ql_dbg(ql_dbg_init, base_vha, 0x00e4,
803 "queue_id=%d rid=%d vp_idx=%d hw=%p.\n", 808 "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
804 que_id, rsp->rid, rsp->vp_idx, rsp->hw); 809 que_id, rsp->rid, rsp->vp_idx, rsp->hw);
805 /* Use alternate PCI bus number */ 810 /* Use alternate PCI bus number */
806 if (MSB(rsp->rid)) 811 if (MSB(rsp->rid))
@@ -812,23 +817,27 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
812 if (!IS_MSIX_NACK_CAPABLE(ha)) 817 if (!IS_MSIX_NACK_CAPABLE(ha))
813 options |= BIT_6; 818 options |= BIT_6;
814 819
820 /* Set option to indicate response queue creation */
821 options |= BIT_1;
822
815 rsp->options = options; 823 rsp->options = options;
816 rsp->id = que_id; 824 rsp->id = que_id;
817 reg = ISP_QUE_REG(ha, que_id); 825 reg = ISP_QUE_REG(ha, que_id);
818 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in; 826 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
819 rsp->rsp_q_out = &reg->isp25mq.rsp_q_out; 827 rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
820 rsp->in_ptr = (void *)(rsp->ring + rsp->length); 828 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
821 mutex_unlock(&ha->vport_lock); 829 mutex_unlock(&ha->mq_lock);
822 ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, 830 ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
823 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", 831 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
824 rsp->options, rsp->id, rsp->rsp_q_in, 832 rsp->options, rsp->id, rsp->rsp_q_in,
825 rsp->rsp_q_out); 833 rsp->rsp_q_out);
826 ql_dbg(ql_dbg_init, base_vha, 0x00e5, 834 ql_dbg(ql_dbg_init, base_vha, 0x00e5,
827 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", 835 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
828 rsp->options, rsp->id, rsp->rsp_q_in, 836 rsp->options, rsp->id, rsp->rsp_q_in,
829 rsp->rsp_q_out); 837 rsp->rsp_q_out);
830 838
831 ret = qla25xx_request_irq(rsp); 839 ret = qla25xx_request_irq(ha, qpair, qpair->msix,
840 QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
832 if (ret) 841 if (ret)
833 goto que_failed; 842 goto que_failed;
834 843
@@ -836,19 +845,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
836 if (ret != QLA_SUCCESS) { 845 if (ret != QLA_SUCCESS) {
837 ql_log(ql_log_fatal, base_vha, 0x00e7, 846 ql_log(ql_log_fatal, base_vha, 0x00e7,
838 "%s failed.\n", __func__); 847 "%s failed.\n", __func__);
839 mutex_lock(&ha->vport_lock); 848 mutex_lock(&ha->mq_lock);
840 clear_bit(que_id, ha->rsp_qid_map); 849 clear_bit(que_id, ha->rsp_qid_map);
841 mutex_unlock(&ha->vport_lock); 850 mutex_unlock(&ha->mq_lock);
842 goto que_failed; 851 goto que_failed;
843 } 852 }
844 if (req >= 0) 853 rsp->req = NULL;
845 rsp->req = ha->req_q_map[req];
846 else
847 rsp->req = NULL;
848 854
849 qla2x00_init_response_q_entries(rsp); 855 qla2x00_init_response_q_entries(rsp);
850 if (rsp->hw->wq) 856 if (qpair->hw->wq)
851 INIT_WORK(&rsp->q_work, qla_do_work); 857 INIT_WORK(&qpair->q_work, qla_do_work);
852 return rsp->id; 858 return rsp->id;
853 859
854que_failed: 860que_failed:
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 56d6142852a5..8521cfe302e9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,6 +13,7 @@
13#include <linux/mutex.h> 13#include <linux/mutex.h>
14#include <linux/kobject.h> 14#include <linux/kobject.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/blk-mq-pci.h>
16#include <scsi/scsi_tcq.h> 17#include <scsi/scsi_tcq.h>
17#include <scsi/scsicam.h> 18#include <scsi/scsicam.h>
18#include <scsi/scsi_transport.h> 19#include <scsi/scsi_transport.h>
@@ -30,7 +31,7 @@ static int apidev_major;
30/* 31/*
31 * SRB allocation cache 32 * SRB allocation cache
32 */ 33 */
33static struct kmem_cache *srb_cachep; 34struct kmem_cache *srb_cachep;
34 35
35/* 36/*
36 * CT6 CTX allocation cache 37 * CT6 CTX allocation cache
@@ -143,19 +144,12 @@ MODULE_PARM_DESC(ql2xiidmaenable,
143 "Enables iIDMA settings " 144 "Enables iIDMA settings "
144 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 145 "Default is 1 - perform iIDMA. 0 - no iIDMA.");
145 146
146int ql2xmaxqueues = 1; 147int ql2xmqsupport = 1;
147module_param(ql2xmaxqueues, int, S_IRUGO); 148module_param(ql2xmqsupport, int, S_IRUGO);
148MODULE_PARM_DESC(ql2xmaxqueues, 149MODULE_PARM_DESC(ql2xmqsupport,
149 "Enables MQ settings " 150 "Enable on demand multiple queue pairs support "
150 "Default is 1 for single queue. Set it to number " 151 "Default is 1 for supported. "
151 "of queues in MQ mode."); 152 "Set it to 0 to turn off mq qpair support.");
152
153int ql2xmultique_tag;
154module_param(ql2xmultique_tag, int, S_IRUGO);
155MODULE_PARM_DESC(ql2xmultique_tag,
156 "Enables CPU affinity settings for the driver "
157 "Default is 0 for no affinity of request and response IO. "
158 "Set it to 1 to turn on the cpu affinity.");
159 153
160int ql2xfwloadbin; 154int ql2xfwloadbin;
161module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 155module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
@@ -261,6 +255,7 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
261static void qla2x00_clear_drv_active(struct qla_hw_data *); 255static void qla2x00_clear_drv_active(struct qla_hw_data *);
262static void qla2x00_free_device(scsi_qla_host_t *); 256static void qla2x00_free_device(scsi_qla_host_t *);
263static void qla83xx_disable_laser(scsi_qla_host_t *vha); 257static void qla83xx_disable_laser(scsi_qla_host_t *vha);
258static int qla2xxx_map_queues(struct Scsi_Host *shost);
264 259
265struct scsi_host_template qla2xxx_driver_template = { 260struct scsi_host_template qla2xxx_driver_template = {
266 .module = THIS_MODULE, 261 .module = THIS_MODULE,
@@ -280,6 +275,7 @@ struct scsi_host_template qla2xxx_driver_template = {
280 .scan_finished = qla2xxx_scan_finished, 275 .scan_finished = qla2xxx_scan_finished,
281 .scan_start = qla2xxx_scan_start, 276 .scan_start = qla2xxx_scan_start,
282 .change_queue_depth = scsi_change_queue_depth, 277 .change_queue_depth = scsi_change_queue_depth,
278 .map_queues = qla2xxx_map_queues,
283 .this_id = -1, 279 .this_id = -1,
284 .cmd_per_lun = 3, 280 .cmd_per_lun = 3,
285 .use_clustering = ENABLE_CLUSTERING, 281 .use_clustering = ENABLE_CLUSTERING,
@@ -339,6 +335,8 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
339 struct req_que **, struct rsp_que **); 335 struct req_que **, struct rsp_que **);
340static void qla2x00_free_fw_dump(struct qla_hw_data *); 336static void qla2x00_free_fw_dump(struct qla_hw_data *);
341static void qla2x00_mem_free(struct qla_hw_data *); 337static void qla2x00_mem_free(struct qla_hw_data *);
338int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
339 struct qla_qpair *qpair);
342 340
343/* -------------------------------------------------------------------------- */ 341/* -------------------------------------------------------------------------- */
344static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, 342static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
@@ -360,6 +358,25 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
360 "Unable to allocate memory for response queue ptrs.\n"); 358 "Unable to allocate memory for response queue ptrs.\n");
361 goto fail_rsp_map; 359 goto fail_rsp_map;
362 } 360 }
361
362 if (ql2xmqsupport && ha->max_qpairs) {
363 ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
364 GFP_KERNEL);
365 if (!ha->queue_pair_map) {
366 ql_log(ql_log_fatal, vha, 0x0180,
367 "Unable to allocate memory for queue pair ptrs.\n");
368 goto fail_qpair_map;
369 }
370 ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
371 if (ha->base_qpair == NULL) {
372 ql_log(ql_log_warn, vha, 0x0182,
373 "Failed to allocate base queue pair memory.\n");
374 goto fail_base_qpair;
375 }
376 ha->base_qpair->req = req;
377 ha->base_qpair->rsp = rsp;
378 }
379
363 /* 380 /*
364 * Make sure we record at least the request and response queue zero in 381 * Make sure we record at least the request and response queue zero in
365 * case we need to free them if part of the probe fails. 382 * case we need to free them if part of the probe fails.
@@ -370,6 +387,11 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
370 set_bit(0, ha->req_qid_map); 387 set_bit(0, ha->req_qid_map);
371 return 1; 388 return 1;
372 389
390fail_base_qpair:
391 kfree(ha->queue_pair_map);
392fail_qpair_map:
393 kfree(ha->rsp_q_map);
394 ha->rsp_q_map = NULL;
373fail_rsp_map: 395fail_rsp_map:
374 kfree(ha->req_q_map); 396 kfree(ha->req_q_map);
375 ha->req_q_map = NULL; 397 ha->req_q_map = NULL;
@@ -417,82 +439,43 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
417 struct req_que *req; 439 struct req_que *req;
418 struct rsp_que *rsp; 440 struct rsp_que *rsp;
419 int cnt; 441 int cnt;
442 unsigned long flags;
420 443
444 spin_lock_irqsave(&ha->hardware_lock, flags);
421 for (cnt = 0; cnt < ha->max_req_queues; cnt++) { 445 for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
422 if (!test_bit(cnt, ha->req_qid_map)) 446 if (!test_bit(cnt, ha->req_qid_map))
423 continue; 447 continue;
424 448
425 req = ha->req_q_map[cnt]; 449 req = ha->req_q_map[cnt];
450 clear_bit(cnt, ha->req_qid_map);
451 ha->req_q_map[cnt] = NULL;
452
453 spin_unlock_irqrestore(&ha->hardware_lock, flags);
426 qla2x00_free_req_que(ha, req); 454 qla2x00_free_req_que(ha, req);
455 spin_lock_irqsave(&ha->hardware_lock, flags);
427 } 456 }
457 spin_unlock_irqrestore(&ha->hardware_lock, flags);
458
428 kfree(ha->req_q_map); 459 kfree(ha->req_q_map);
429 ha->req_q_map = NULL; 460 ha->req_q_map = NULL;
430 461
462
463 spin_lock_irqsave(&ha->hardware_lock, flags);
431 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { 464 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
432 if (!test_bit(cnt, ha->rsp_qid_map)) 465 if (!test_bit(cnt, ha->rsp_qid_map))
433 continue; 466 continue;
434 467
435 rsp = ha->rsp_q_map[cnt]; 468 rsp = ha->rsp_q_map[cnt];
469 clear_bit(cnt, ha->req_qid_map);
470 ha->rsp_q_map[cnt] = NULL;
471 spin_unlock_irqrestore(&ha->hardware_lock, flags);
436 qla2x00_free_rsp_que(ha, rsp); 472 qla2x00_free_rsp_que(ha, rsp);
473 spin_lock_irqsave(&ha->hardware_lock, flags);
437 } 474 }
438 kfree(ha->rsp_q_map); 475 spin_unlock_irqrestore(&ha->hardware_lock, flags);
439 ha->rsp_q_map = NULL;
440}
441
442static int qla25xx_setup_mode(struct scsi_qla_host *vha)
443{
444 uint16_t options = 0;
445 int ques, req, ret;
446 struct qla_hw_data *ha = vha->hw;
447 476
448 if (!(ha->fw_attributes & BIT_6)) {
449 ql_log(ql_log_warn, vha, 0x00d8,
450 "Firmware is not multi-queue capable.\n");
451 goto fail;
452 }
453 if (ql2xmultique_tag) {
454 /* create a request queue for IO */
455 options |= BIT_7;
456 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
457 QLA_DEFAULT_QUE_QOS);
458 if (!req) {
459 ql_log(ql_log_warn, vha, 0x00e0,
460 "Failed to create request queue.\n");
461 goto fail;
462 }
463 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
464 vha->req = ha->req_q_map[req];
465 options |= BIT_1;
466 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
467 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
468 if (!ret) {
469 ql_log(ql_log_warn, vha, 0x00e8,
470 "Failed to create response queue.\n");
471 goto fail2;
472 }
473 }
474 ha->flags.cpu_affinity_enabled = 1;
475 ql_dbg(ql_dbg_multiq, vha, 0xc007,
476 "CPU affinity mode enabled, "
477 "no. of response queues:%d no. of request queues:%d.\n",
478 ha->max_rsp_queues, ha->max_req_queues);
479 ql_dbg(ql_dbg_init, vha, 0x00e9,
480 "CPU affinity mode enabled, "
481 "no. of response queues:%d no. of request queues:%d.\n",
482 ha->max_rsp_queues, ha->max_req_queues);
483 }
484 return 0;
485fail2:
486 qla25xx_delete_queues(vha);
487 destroy_workqueue(ha->wq);
488 ha->wq = NULL;
489 vha->req = ha->req_q_map[0];
490fail:
491 ha->mqenable = 0;
492 kfree(ha->req_q_map);
493 kfree(ha->rsp_q_map); 477 kfree(ha->rsp_q_map);
494 ha->max_req_queues = ha->max_rsp_queues = 1; 478 ha->rsp_q_map = NULL;
495 return 1;
496} 479}
497 480
498static char * 481static char *
@@ -669,7 +652,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
669 qla2x00_rel_sp(sp->fcport->vha, sp); 652 qla2x00_rel_sp(sp->fcport->vha, sp);
670} 653}
671 654
672static void 655void
673qla2x00_sp_compl(void *data, void *ptr, int res) 656qla2x00_sp_compl(void *data, void *ptr, int res)
674{ 657{
675 struct qla_hw_data *ha = (struct qla_hw_data *)data; 658 struct qla_hw_data *ha = (struct qla_hw_data *)data;
@@ -693,6 +676,75 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
693 cmd->scsi_done(cmd); 676 cmd->scsi_done(cmd);
694} 677}
695 678
679void
680qla2xxx_qpair_sp_free_dma(void *vha, void *ptr)
681{
682 srb_t *sp = (srb_t *)ptr;
683 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
684 struct qla_hw_data *ha = sp->fcport->vha->hw;
685 void *ctx = GET_CMD_CTX_SP(sp);
686
687 if (sp->flags & SRB_DMA_VALID) {
688 scsi_dma_unmap(cmd);
689 sp->flags &= ~SRB_DMA_VALID;
690 }
691
692 if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
693 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
694 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
695 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
696 }
697
698 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
699 /* List assured to be having elements */
700 qla2x00_clean_dsd_pool(ha, sp, NULL);
701 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
702 }
703
704 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
705 dma_pool_free(ha->dl_dma_pool, ctx,
706 ((struct crc_context *)ctx)->crc_ctx_dma);
707 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
708 }
709
710 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
711 struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
712
713 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
714 ctx1->fcp_cmnd_dma);
715 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
716 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
717 ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
718 mempool_free(ctx1, ha->ctx_mempool);
719 }
720
721 CMD_SP(cmd) = NULL;
722 qla2xxx_rel_qpair_sp(sp->qpair, sp);
723}
724
725void
726qla2xxx_qpair_sp_compl(void *data, void *ptr, int res)
727{
728 srb_t *sp = (srb_t *)ptr;
729 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
730
731 cmd->result = res;
732
733 if (atomic_read(&sp->ref_count) == 0) {
734 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079,
735 "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
736 sp, GET_CMD_SP(sp));
737 if (ql2xextended_error_logging & ql_dbg_io)
738 WARN_ON(atomic_read(&sp->ref_count) == 0);
739 return;
740 }
741 if (!atomic_dec_and_test(&sp->ref_count))
742 return;
743
744 qla2xxx_qpair_sp_free_dma(sp->fcport->vha, sp);
745 cmd->scsi_done(cmd);
746}
747
696/* If we are SP1 here, we need to still take and release the host_lock as SP1 748/* If we are SP1 here, we need to still take and release the host_lock as SP1
697 * does not have the changes necessary to avoid taking host->host_lock. 749 * does not have the changes necessary to avoid taking host->host_lock.
698 */ 750 */
@@ -706,12 +758,28 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
706 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 758 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
707 srb_t *sp; 759 srb_t *sp;
708 int rval; 760 int rval;
761 struct qla_qpair *qpair = NULL;
762 uint32_t tag;
763 uint16_t hwq;
709 764
710 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) { 765 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
711 cmd->result = DID_NO_CONNECT << 16; 766 cmd->result = DID_NO_CONNECT << 16;
712 goto qc24_fail_command; 767 goto qc24_fail_command;
713 } 768 }
714 769
770 if (ha->mqenable) {
771 if (shost_use_blk_mq(vha->host)) {
772 tag = blk_mq_unique_tag(cmd->request);
773 hwq = blk_mq_unique_tag_to_hwq(tag);
774 qpair = ha->queue_pair_map[hwq];
775 } else if (vha->vp_idx && vha->qpair) {
776 qpair = vha->qpair;
777 }
778
779 if (qpair)
780 return qla2xxx_mqueuecommand(host, cmd, qpair);
781 }
782
715 if (ha->flags.eeh_busy) { 783 if (ha->flags.eeh_busy) {
716 if (ha->flags.pci_channel_io_perm_failure) { 784 if (ha->flags.pci_channel_io_perm_failure) {
717 ql_dbg(ql_dbg_aer, vha, 0x9010, 785 ql_dbg(ql_dbg_aer, vha, 0x9010,
@@ -808,6 +876,95 @@ qc24_fail_command:
808 return 0; 876 return 0;
809} 877}
810 878
879/* For MQ supported I/O */
880int
881qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
882 struct qla_qpair *qpair)
883{
884 scsi_qla_host_t *vha = shost_priv(host);
885 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
886 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
887 struct qla_hw_data *ha = vha->hw;
888 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
889 srb_t *sp;
890 int rval;
891
892 rval = fc_remote_port_chkready(rport);
893 if (rval) {
894 cmd->result = rval;
895 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
896 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
897 cmd, rval);
898 goto qc24_fail_command;
899 }
900
901 if (!fcport) {
902 cmd->result = DID_NO_CONNECT << 16;
903 goto qc24_fail_command;
904 }
905
906 if (atomic_read(&fcport->state) != FCS_ONLINE) {
907 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
908 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
909 ql_dbg(ql_dbg_io, vha, 0x3077,
910 "Returning DNC, fcport_state=%d loop_state=%d.\n",
911 atomic_read(&fcport->state),
912 atomic_read(&base_vha->loop_state));
913 cmd->result = DID_NO_CONNECT << 16;
914 goto qc24_fail_command;
915 }
916 goto qc24_target_busy;
917 }
918
919 /*
920 * Return target busy if we've received a non-zero retry_delay_timer
921 * in a FCP_RSP.
922 */
923 if (fcport->retry_delay_timestamp == 0) {
924 /* retry delay not set */
925 } else if (time_after(jiffies, fcport->retry_delay_timestamp))
926 fcport->retry_delay_timestamp = 0;
927 else
928 goto qc24_target_busy;
929
930 sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
931 if (!sp)
932 goto qc24_host_busy;
933
934 sp->u.scmd.cmd = cmd;
935 sp->type = SRB_SCSI_CMD;
936 atomic_set(&sp->ref_count, 1);
937 CMD_SP(cmd) = (void *)sp;
938 sp->free = qla2xxx_qpair_sp_free_dma;
939 sp->done = qla2xxx_qpair_sp_compl;
940 sp->qpair = qpair;
941
942 rval = ha->isp_ops->start_scsi_mq(sp);
943 if (rval != QLA_SUCCESS) {
944 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
945 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
946 if (rval == QLA_INTERFACE_ERROR)
947 goto qc24_fail_command;
948 goto qc24_host_busy_free_sp;
949 }
950
951 return 0;
952
953qc24_host_busy_free_sp:
954 qla2xxx_qpair_sp_free_dma(vha, sp);
955
956qc24_host_busy:
957 return SCSI_MLQUEUE_HOST_BUSY;
958
959qc24_target_busy:
960 return SCSI_MLQUEUE_TARGET_BUSY;
961
962qc24_fail_command:
963 cmd->scsi_done(cmd);
964
965 return 0;
966}
967
811/* 968/*
812 * qla2x00_eh_wait_on_command 969 * qla2x00_eh_wait_on_command
813 * Waits for the command to be returned by the Firmware for some 970 * Waits for the command to be returned by the Firmware for some
@@ -1601,7 +1758,6 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1601{ 1758{
1602 resource_size_t pio; 1759 resource_size_t pio;
1603 uint16_t msix; 1760 uint16_t msix;
1604 int cpus;
1605 1761
1606 if (pci_request_selected_regions(ha->pdev, ha->bars, 1762 if (pci_request_selected_regions(ha->pdev, ha->bars,
1607 QLA2XXX_DRIVER_NAME)) { 1763 QLA2XXX_DRIVER_NAME)) {
@@ -1658,9 +1814,7 @@ skip_pio:
1658 1814
1659 /* Determine queue resources */ 1815 /* Determine queue resources */
1660 ha->max_req_queues = ha->max_rsp_queues = 1; 1816 ha->max_req_queues = ha->max_rsp_queues = 1;
1661 if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) || 1817 if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1662 (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
1663 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1664 goto mqiobase_exit; 1818 goto mqiobase_exit;
1665 1819
1666 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1820 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
@@ -1670,26 +1824,18 @@ skip_pio:
1670 "MQIO Base=%p.\n", ha->mqiobase); 1824 "MQIO Base=%p.\n", ha->mqiobase);
1671 /* Read MSIX vector size of the board */ 1825 /* Read MSIX vector size of the board */
1672 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 1826 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1673 ha->msix_count = msix; 1827 ha->msix_count = msix + 1;
1674 /* Max queues are bounded by available msix vectors */ 1828 /* Max queues are bounded by available msix vectors */
1675 /* queue 0 uses two msix vectors */ 1829 /* MB interrupt uses 1 vector */
1676 if (ql2xmultique_tag) { 1830 ha->max_req_queues = ha->msix_count - 1;
1677 cpus = num_online_cpus(); 1831 ha->max_rsp_queues = ha->max_req_queues;
1678 ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ? 1832 /* Queue pairs is the max value minus the base queue pair */
1679 (cpus + 1) : (ha->msix_count - 1); 1833 ha->max_qpairs = ha->max_rsp_queues - 1;
1680 ha->max_req_queues = 2; 1834 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
1681 } else if (ql2xmaxqueues > 1) { 1835 "Max no of queues pairs: %d.\n", ha->max_qpairs);
1682 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? 1836
1683 QLA_MQ_SIZE : ql2xmaxqueues;
1684 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
1685 "QoS mode set, max no of request queues:%d.\n",
1686 ha->max_req_queues);
1687 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
1688 "QoS mode set, max no of request queues:%d.\n",
1689 ha->max_req_queues);
1690 }
1691 ql_log_pci(ql_log_info, ha->pdev, 0x001a, 1837 ql_log_pci(ql_log_info, ha->pdev, 0x001a,
1692 "MSI-X vector count: %d.\n", msix); 1838 "MSI-X vector count: %d.\n", ha->msix_count);
1693 } else 1839 } else
1694 ql_log_pci(ql_log_info, ha->pdev, 0x001b, 1840 ql_log_pci(ql_log_info, ha->pdev, 0x001b,
1695 "BAR 3 not enabled.\n"); 1841 "BAR 3 not enabled.\n");
@@ -1709,7 +1855,6 @@ static int
1709qla83xx_iospace_config(struct qla_hw_data *ha) 1855qla83xx_iospace_config(struct qla_hw_data *ha)
1710{ 1856{
1711 uint16_t msix; 1857 uint16_t msix;
1712 int cpus;
1713 1858
1714 if (pci_request_selected_regions(ha->pdev, ha->bars, 1859 if (pci_request_selected_regions(ha->pdev, ha->bars,
1715 QLA2XXX_DRIVER_NAME)) { 1860 QLA2XXX_DRIVER_NAME)) {
@@ -1761,32 +1906,36 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
1761 /* Read MSIX vector size of the board */ 1906 /* Read MSIX vector size of the board */
1762 pci_read_config_word(ha->pdev, 1907 pci_read_config_word(ha->pdev,
1763 QLA_83XX_PCI_MSIX_CONTROL, &msix); 1908 QLA_83XX_PCI_MSIX_CONTROL, &msix);
1764 ha->msix_count = msix; 1909 ha->msix_count = msix + 1;
1765 /* Max queues are bounded by available msix vectors */ 1910 /*
1766 /* queue 0 uses two msix vectors */ 1911 * By default, driver uses at least two msix vectors
1767 if (ql2xmultique_tag) { 1912 * (default & rspq)
1768 cpus = num_online_cpus(); 1913 */
1769 ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ? 1914 if (ql2xmqsupport) {
1770 (cpus + 1) : (ha->msix_count - 1); 1915 /* MB interrupt uses 1 vector */
1771 ha->max_req_queues = 2; 1916 ha->max_req_queues = ha->msix_count - 1;
1772 } else if (ql2xmaxqueues > 1) { 1917 ha->max_rsp_queues = ha->max_req_queues;
1773 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? 1918
1774 QLA_MQ_SIZE : ql2xmaxqueues; 1919 /* ATIOQ needs 1 vector. That's 1 less QPair */
1775 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c, 1920 if (QLA_TGT_MODE_ENABLED())
1776 "QoS mode set, max no of request queues:%d.\n", 1921 ha->max_req_queues--;
1777 ha->max_req_queues); 1922
1778 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, 1923 /* Queue pairs is the max value minus
1779 "QoS mode set, max no of request queues:%d.\n", 1924 * the base queue pair */
1780 ha->max_req_queues); 1925 ha->max_qpairs = ha->max_req_queues - 1;
1926 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
1927 "Max no of queues pairs: %d.\n", ha->max_qpairs);
1781 } 1928 }
1782 ql_log_pci(ql_log_info, ha->pdev, 0x011c, 1929 ql_log_pci(ql_log_info, ha->pdev, 0x011c,
1783 "MSI-X vector count: %d.\n", msix); 1930 "MSI-X vector count: %d.\n", ha->msix_count);
1784 } else 1931 } else
1785 ql_log_pci(ql_log_info, ha->pdev, 0x011e, 1932 ql_log_pci(ql_log_info, ha->pdev, 0x011e,
1786 "BAR 1 not enabled.\n"); 1933 "BAR 1 not enabled.\n");
1787 1934
1788mqiobase_exit: 1935mqiobase_exit:
1789 ha->msix_count = ha->max_rsp_queues + 1; 1936 ha->msix_count = ha->max_rsp_queues + 1;
1937 if (QLA_TGT_MODE_ENABLED())
1938 ha->msix_count++;
1790 1939
1791 qlt_83xx_iospace_config(ha); 1940 qlt_83xx_iospace_config(ha);
1792 1941
@@ -1831,6 +1980,7 @@ static struct isp_operations qla2100_isp_ops = {
1831 .write_optrom = qla2x00_write_optrom_data, 1980 .write_optrom = qla2x00_write_optrom_data,
1832 .get_flash_version = qla2x00_get_flash_version, 1981 .get_flash_version = qla2x00_get_flash_version,
1833 .start_scsi = qla2x00_start_scsi, 1982 .start_scsi = qla2x00_start_scsi,
1983 .start_scsi_mq = NULL,
1834 .abort_isp = qla2x00_abort_isp, 1984 .abort_isp = qla2x00_abort_isp,
1835 .iospace_config = qla2x00_iospace_config, 1985 .iospace_config = qla2x00_iospace_config,
1836 .initialize_adapter = qla2x00_initialize_adapter, 1986 .initialize_adapter = qla2x00_initialize_adapter,
@@ -1869,6 +2019,7 @@ static struct isp_operations qla2300_isp_ops = {
1869 .write_optrom = qla2x00_write_optrom_data, 2019 .write_optrom = qla2x00_write_optrom_data,
1870 .get_flash_version = qla2x00_get_flash_version, 2020 .get_flash_version = qla2x00_get_flash_version,
1871 .start_scsi = qla2x00_start_scsi, 2021 .start_scsi = qla2x00_start_scsi,
2022 .start_scsi_mq = NULL,
1872 .abort_isp = qla2x00_abort_isp, 2023 .abort_isp = qla2x00_abort_isp,
1873 .iospace_config = qla2x00_iospace_config, 2024 .iospace_config = qla2x00_iospace_config,
1874 .initialize_adapter = qla2x00_initialize_adapter, 2025 .initialize_adapter = qla2x00_initialize_adapter,
@@ -1907,6 +2058,7 @@ static struct isp_operations qla24xx_isp_ops = {
1907 .write_optrom = qla24xx_write_optrom_data, 2058 .write_optrom = qla24xx_write_optrom_data,
1908 .get_flash_version = qla24xx_get_flash_version, 2059 .get_flash_version = qla24xx_get_flash_version,
1909 .start_scsi = qla24xx_start_scsi, 2060 .start_scsi = qla24xx_start_scsi,
2061 .start_scsi_mq = NULL,
1910 .abort_isp = qla2x00_abort_isp, 2062 .abort_isp = qla2x00_abort_isp,
1911 .iospace_config = qla2x00_iospace_config, 2063 .iospace_config = qla2x00_iospace_config,
1912 .initialize_adapter = qla2x00_initialize_adapter, 2064 .initialize_adapter = qla2x00_initialize_adapter,
@@ -1945,6 +2097,7 @@ static struct isp_operations qla25xx_isp_ops = {
1945 .write_optrom = qla24xx_write_optrom_data, 2097 .write_optrom = qla24xx_write_optrom_data,
1946 .get_flash_version = qla24xx_get_flash_version, 2098 .get_flash_version = qla24xx_get_flash_version,
1947 .start_scsi = qla24xx_dif_start_scsi, 2099 .start_scsi = qla24xx_dif_start_scsi,
2100 .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
1948 .abort_isp = qla2x00_abort_isp, 2101 .abort_isp = qla2x00_abort_isp,
1949 .iospace_config = qla2x00_iospace_config, 2102 .iospace_config = qla2x00_iospace_config,
1950 .initialize_adapter = qla2x00_initialize_adapter, 2103 .initialize_adapter = qla2x00_initialize_adapter,
@@ -1983,6 +2136,7 @@ static struct isp_operations qla81xx_isp_ops = {
1983 .write_optrom = qla24xx_write_optrom_data, 2136 .write_optrom = qla24xx_write_optrom_data,
1984 .get_flash_version = qla24xx_get_flash_version, 2137 .get_flash_version = qla24xx_get_flash_version,
1985 .start_scsi = qla24xx_dif_start_scsi, 2138 .start_scsi = qla24xx_dif_start_scsi,
2139 .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
1986 .abort_isp = qla2x00_abort_isp, 2140 .abort_isp = qla2x00_abort_isp,
1987 .iospace_config = qla2x00_iospace_config, 2141 .iospace_config = qla2x00_iospace_config,
1988 .initialize_adapter = qla2x00_initialize_adapter, 2142 .initialize_adapter = qla2x00_initialize_adapter,
@@ -2021,6 +2175,7 @@ static struct isp_operations qla82xx_isp_ops = {
2021 .write_optrom = qla82xx_write_optrom_data, 2175 .write_optrom = qla82xx_write_optrom_data,
2022 .get_flash_version = qla82xx_get_flash_version, 2176 .get_flash_version = qla82xx_get_flash_version,
2023 .start_scsi = qla82xx_start_scsi, 2177 .start_scsi = qla82xx_start_scsi,
2178 .start_scsi_mq = NULL,
2024 .abort_isp = qla82xx_abort_isp, 2179 .abort_isp = qla82xx_abort_isp,
2025 .iospace_config = qla82xx_iospace_config, 2180 .iospace_config = qla82xx_iospace_config,
2026 .initialize_adapter = qla2x00_initialize_adapter, 2181 .initialize_adapter = qla2x00_initialize_adapter,
@@ -2059,6 +2214,7 @@ static struct isp_operations qla8044_isp_ops = {
2059 .write_optrom = qla8044_write_optrom_data, 2214 .write_optrom = qla8044_write_optrom_data,
2060 .get_flash_version = qla82xx_get_flash_version, 2215 .get_flash_version = qla82xx_get_flash_version,
2061 .start_scsi = qla82xx_start_scsi, 2216 .start_scsi = qla82xx_start_scsi,
2217 .start_scsi_mq = NULL,
2062 .abort_isp = qla8044_abort_isp, 2218 .abort_isp = qla8044_abort_isp,
2063 .iospace_config = qla82xx_iospace_config, 2219 .iospace_config = qla82xx_iospace_config,
2064 .initialize_adapter = qla2x00_initialize_adapter, 2220 .initialize_adapter = qla2x00_initialize_adapter,
@@ -2097,6 +2253,7 @@ static struct isp_operations qla83xx_isp_ops = {
2097 .write_optrom = qla24xx_write_optrom_data, 2253 .write_optrom = qla24xx_write_optrom_data,
2098 .get_flash_version = qla24xx_get_flash_version, 2254 .get_flash_version = qla24xx_get_flash_version,
2099 .start_scsi = qla24xx_dif_start_scsi, 2255 .start_scsi = qla24xx_dif_start_scsi,
2256 .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
2100 .abort_isp = qla2x00_abort_isp, 2257 .abort_isp = qla2x00_abort_isp,
2101 .iospace_config = qla83xx_iospace_config, 2258 .iospace_config = qla83xx_iospace_config,
2102 .initialize_adapter = qla2x00_initialize_adapter, 2259 .initialize_adapter = qla2x00_initialize_adapter,
@@ -2135,6 +2292,7 @@ static struct isp_operations qlafx00_isp_ops = {
2135 .write_optrom = qla24xx_write_optrom_data, 2292 .write_optrom = qla24xx_write_optrom_data,
2136 .get_flash_version = qla24xx_get_flash_version, 2293 .get_flash_version = qla24xx_get_flash_version,
2137 .start_scsi = qlafx00_start_scsi, 2294 .start_scsi = qlafx00_start_scsi,
2295 .start_scsi_mq = NULL,
2138 .abort_isp = qlafx00_abort_isp, 2296 .abort_isp = qlafx00_abort_isp,
2139 .iospace_config = qlafx00_iospace_config, 2297 .iospace_config = qlafx00_iospace_config,
2140 .initialize_adapter = qlafx00_initialize_adapter, 2298 .initialize_adapter = qlafx00_initialize_adapter,
@@ -2173,6 +2331,7 @@ static struct isp_operations qla27xx_isp_ops = {
2173 .write_optrom = qla24xx_write_optrom_data, 2331 .write_optrom = qla24xx_write_optrom_data,
2174 .get_flash_version = qla24xx_get_flash_version, 2332 .get_flash_version = qla24xx_get_flash_version,
2175 .start_scsi = qla24xx_dif_start_scsi, 2333 .start_scsi = qla24xx_dif_start_scsi,
2334 .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
2176 .abort_isp = qla2x00_abort_isp, 2335 .abort_isp = qla2x00_abort_isp,
2177 .iospace_config = qla83xx_iospace_config, 2336 .iospace_config = qla83xx_iospace_config,
2178 .initialize_adapter = qla2x00_initialize_adapter, 2337 .initialize_adapter = qla2x00_initialize_adapter,
@@ -2387,6 +2546,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2387 uint16_t req_length = 0, rsp_length = 0; 2546 uint16_t req_length = 0, rsp_length = 0;
2388 struct req_que *req = NULL; 2547 struct req_que *req = NULL;
2389 struct rsp_que *rsp = NULL; 2548 struct rsp_que *rsp = NULL;
2549 int i;
2550
2390 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 2551 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
2391 sht = &qla2xxx_driver_template; 2552 sht = &qla2xxx_driver_template;
2392 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || 2553 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
@@ -2650,6 +2811,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2650 "Found an ISP%04X irq %d iobase 0x%p.\n", 2811 "Found an ISP%04X irq %d iobase 0x%p.\n",
2651 pdev->device, pdev->irq, ha->iobase); 2812 pdev->device, pdev->irq, ha->iobase);
2652 mutex_init(&ha->vport_lock); 2813 mutex_init(&ha->vport_lock);
2814 mutex_init(&ha->mq_lock);
2653 init_completion(&ha->mbx_cmd_comp); 2815 init_completion(&ha->mbx_cmd_comp);
2654 complete(&ha->mbx_cmd_comp); 2816 complete(&ha->mbx_cmd_comp);
2655 init_completion(&ha->mbx_intr_comp); 2817 init_completion(&ha->mbx_intr_comp);
@@ -2737,7 +2899,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2737 host->max_cmd_len, host->max_channel, host->max_lun, 2899 host->max_cmd_len, host->max_channel, host->max_lun,
2738 host->transportt, sht->vendor_id); 2900 host->transportt, sht->vendor_id);
2739 2901
2740que_init: 2902 /* Set up the irqs */
2903 ret = qla2x00_request_irqs(ha, rsp);
2904 if (ret)
2905 goto probe_init_failed;
2906
2741 /* Alloc arrays of request and response ring ptrs */ 2907 /* Alloc arrays of request and response ring ptrs */
2742 if (!qla2x00_alloc_queues(ha, req, rsp)) { 2908 if (!qla2x00_alloc_queues(ha, req, rsp)) {
2743 ql_log(ql_log_fatal, base_vha, 0x003d, 2909 ql_log(ql_log_fatal, base_vha, 0x003d,
@@ -2746,12 +2912,17 @@ que_init:
2746 goto probe_init_failed; 2912 goto probe_init_failed;
2747 } 2913 }
2748 2914
2749 qlt_probe_one_stage1(base_vha, ha); 2915 if (ha->mqenable && shost_use_blk_mq(host)) {
2916 /* number of hardware queues supported by blk/scsi-mq*/
2917 host->nr_hw_queues = ha->max_qpairs;
2750 2918
2751 /* Set up the irqs */ 2919 ql_dbg(ql_dbg_init, base_vha, 0x0192,
2752 ret = qla2x00_request_irqs(ha, rsp); 2920 "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
2753 if (ret) 2921 } else
2754 goto probe_init_failed; 2922 ql_dbg(ql_dbg_init, base_vha, 0x0193,
2923 "blk/scsi-mq disabled.\n");
2924
2925 qlt_probe_one_stage1(base_vha, ha);
2755 2926
2756 pci_save_state(pdev); 2927 pci_save_state(pdev);
2757 2928
@@ -2842,11 +3013,12 @@ que_init:
2842 host->can_queue, base_vha->req, 3013 host->can_queue, base_vha->req,
2843 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 3014 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
2844 3015
2845 if (ha->mqenable) { 3016 if (ha->mqenable && qla_ini_mode_enabled(base_vha)) {
2846 if (qla25xx_setup_mode(base_vha)) { 3017 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
2847 ql_log(ql_log_warn, base_vha, 0x00ec, 3018 /* Create start of day qpairs for Block MQ */
2848 "Failed to create queues, falling back to single queue mode.\n"); 3019 if (shost_use_blk_mq(host)) {
2849 goto que_init; 3020 for (i = 0; i < ha->max_qpairs; i++)
3021 qla2xxx_create_qpair(base_vha, 5, 0);
2850 } 3022 }
2851 } 3023 }
2852 3024
@@ -3115,13 +3287,6 @@ qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
3115static void 3287static void
3116qla2x00_destroy_deferred_work(struct qla_hw_data *ha) 3288qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
3117{ 3289{
3118 /* Flush the work queue and remove it */
3119 if (ha->wq) {
3120 flush_workqueue(ha->wq);
3121 destroy_workqueue(ha->wq);
3122 ha->wq = NULL;
3123 }
3124
3125 /* Cancel all work and destroy DPC workqueues */ 3290 /* Cancel all work and destroy DPC workqueues */
3126 if (ha->dpc_lp_wq) { 3291 if (ha->dpc_lp_wq) {
3127 cancel_work_sync(&ha->idc_aen); 3292 cancel_work_sync(&ha->idc_aen);
@@ -3317,9 +3482,17 @@ qla2x00_free_device(scsi_qla_host_t *vha)
3317 ha->isp_ops->disable_intrs(ha); 3482 ha->isp_ops->disable_intrs(ha);
3318 } 3483 }
3319 3484
3485 qla2x00_free_fcports(vha);
3486
3320 qla2x00_free_irqs(vha); 3487 qla2x00_free_irqs(vha);
3321 3488
3322 qla2x00_free_fcports(vha); 3489 /* Flush the work queue and remove it */
3490 if (ha->wq) {
3491 flush_workqueue(ha->wq);
3492 destroy_workqueue(ha->wq);
3493 ha->wq = NULL;
3494 }
3495
3323 3496
3324 qla2x00_mem_free(ha); 3497 qla2x00_mem_free(ha);
3325 3498
@@ -4034,6 +4207,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4034 INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list); 4207 INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
4035 INIT_LIST_HEAD(&vha->logo_list); 4208 INIT_LIST_HEAD(&vha->logo_list);
4036 INIT_LIST_HEAD(&vha->plogi_ack_list); 4209 INIT_LIST_HEAD(&vha->plogi_ack_list);
4210 INIT_LIST_HEAD(&vha->qp_list);
4037 4211
4038 spin_lock_init(&vha->work_lock); 4212 spin_lock_init(&vha->work_lock);
4039 spin_lock_init(&vha->cmd_list_lock); 4213 spin_lock_init(&vha->cmd_list_lock);
@@ -5038,8 +5212,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
5038 5212
5039 base_vha->flags.init_done = 0; 5213 base_vha->flags.init_done = 0;
5040 qla25xx_delete_queues(base_vha); 5214 qla25xx_delete_queues(base_vha);
5041 qla2x00_free_irqs(base_vha);
5042 qla2x00_free_fcports(base_vha); 5215 qla2x00_free_fcports(base_vha);
5216 qla2x00_free_irqs(base_vha);
5043 qla2x00_mem_free(ha); 5217 qla2x00_mem_free(ha);
5044 qla82xx_md_free(base_vha); 5218 qla82xx_md_free(base_vha);
5045 qla2x00_free_queues(ha); 5219 qla2x00_free_queues(ha);
@@ -5073,6 +5247,8 @@ qla2x00_do_dpc(void *data)
5073{ 5247{
5074 scsi_qla_host_t *base_vha; 5248 scsi_qla_host_t *base_vha;
5075 struct qla_hw_data *ha; 5249 struct qla_hw_data *ha;
5250 uint32_t online;
5251 struct qla_qpair *qpair;
5076 5252
5077 ha = (struct qla_hw_data *)data; 5253 ha = (struct qla_hw_data *)data;
5078 base_vha = pci_get_drvdata(ha->pdev); 5254 base_vha = pci_get_drvdata(ha->pdev);
@@ -5334,6 +5510,22 @@ intr_on_check:
5334 ha->isp_ops->beacon_blink(base_vha); 5510 ha->isp_ops->beacon_blink(base_vha);
5335 } 5511 }
5336 5512
5513 /* qpair online check */
5514 if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED,
5515 &base_vha->dpc_flags)) {
5516 if (ha->flags.eeh_busy ||
5517 ha->flags.pci_channel_io_perm_failure)
5518 online = 0;
5519 else
5520 online = 1;
5521
5522 mutex_lock(&ha->mq_lock);
5523 list_for_each_entry(qpair, &base_vha->qp_list,
5524 qp_list_elem)
5525 qpair->online = online;
5526 mutex_unlock(&ha->mq_lock);
5527 }
5528
5337 if (!IS_QLAFX00(ha)) 5529 if (!IS_QLAFX00(ha))
5338 qla2x00_do_dpc_all_vps(base_vha); 5530 qla2x00_do_dpc_all_vps(base_vha);
5339 5531
@@ -5676,6 +5868,10 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5676 switch (state) { 5868 switch (state) {
5677 case pci_channel_io_normal: 5869 case pci_channel_io_normal:
5678 ha->flags.eeh_busy = 0; 5870 ha->flags.eeh_busy = 0;
5871 if (ql2xmqsupport) {
5872 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
5873 qla2xxx_wake_dpc(vha);
5874 }
5679 return PCI_ERS_RESULT_CAN_RECOVER; 5875 return PCI_ERS_RESULT_CAN_RECOVER;
5680 case pci_channel_io_frozen: 5876 case pci_channel_io_frozen:
5681 ha->flags.eeh_busy = 1; 5877 ha->flags.eeh_busy = 1;
@@ -5689,10 +5885,18 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5689 pci_disable_device(pdev); 5885 pci_disable_device(pdev);
5690 /* Return back all IOs */ 5886 /* Return back all IOs */
5691 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 5887 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
5888 if (ql2xmqsupport) {
5889 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
5890 qla2xxx_wake_dpc(vha);
5891 }
5692 return PCI_ERS_RESULT_NEED_RESET; 5892 return PCI_ERS_RESULT_NEED_RESET;
5693 case pci_channel_io_perm_failure: 5893 case pci_channel_io_perm_failure:
5694 ha->flags.pci_channel_io_perm_failure = 1; 5894 ha->flags.pci_channel_io_perm_failure = 1;
5695 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 5895 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
5896 if (ql2xmqsupport) {
5897 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
5898 qla2xxx_wake_dpc(vha);
5899 }
5696 return PCI_ERS_RESULT_DISCONNECT; 5900 return PCI_ERS_RESULT_DISCONNECT;
5697 } 5901 }
5698 return PCI_ERS_RESULT_NEED_RESET; 5902 return PCI_ERS_RESULT_NEED_RESET;
@@ -5960,6 +6164,13 @@ qla83xx_disable_laser(scsi_qla_host_t *vha)
5960 qla83xx_wr_reg(vha, reg, data); 6164 qla83xx_wr_reg(vha, reg, data);
5961} 6165}
5962 6166
6167static int qla2xxx_map_queues(struct Scsi_Host *shost)
6168{
6169 scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
6170
6171 return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
6172}
6173
5963static const struct pci_error_handlers qla2xxx_err_handler = { 6174static const struct pci_error_handlers qla2xxx_err_handler = {
5964 .error_detected = qla2xxx_pci_error_detected, 6175 .error_detected = qla2xxx_pci_error_detected,
5965 .mmio_enabled = qla2xxx_pci_mmio_enabled, 6176 .mmio_enabled = qla2xxx_pci_mmio_enabled,
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 07349270535d..82dfe07b1d47 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1204,10 +1204,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
1204 struct request_queue *rq = sdev->request_queue; 1204 struct request_queue *rq = sdev->request_queue;
1205 struct scsi_target *starget = sdev->sdev_target; 1205 struct scsi_target *starget = sdev->sdev_target;
1206 1206
1207 error = scsi_device_set_state(sdev, SDEV_RUNNING);
1208 if (error)
1209 return error;
1210
1211 error = scsi_target_add(starget); 1207 error = scsi_target_add(starget);
1212 if (error) 1208 if (error)
1213 return error; 1209 return error;
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index aa43bfea0d00..abe617372661 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -23,6 +23,7 @@
23#include "unipro.h" 23#include "unipro.h"
24#include "ufs-qcom.h" 24#include "ufs-qcom.h"
25#include "ufshci.h" 25#include "ufshci.h"
26#include "ufs_quirks.h"
26#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \ 27#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
27 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN) 28 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
28 29
@@ -1031,6 +1032,34 @@ out:
1031 return ret; 1032 return ret;
1032} 1033}
1033 1034
1035static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
1036{
1037 int err;
1038 u32 pa_vs_config_reg1;
1039
1040 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1041 &pa_vs_config_reg1);
1042 if (err)
1043 goto out;
1044
1045 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
1046 err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1047 (pa_vs_config_reg1 | (1 << 12)));
1048
1049out:
1050 return err;
1051}
1052
1053static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
1054{
1055 int err = 0;
1056
1057 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
1058 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
1059
1060 return err;
1061}
1062
1034static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) 1063static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
1035{ 1064{
1036 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1065 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -1194,7 +1223,16 @@ static int ufs_qcom_init(struct ufs_hba *hba)
1194 */ 1223 */
1195 host->generic_phy = devm_phy_get(dev, "ufsphy"); 1224 host->generic_phy = devm_phy_get(dev, "ufsphy");
1196 1225
1197 if (IS_ERR(host->generic_phy)) { 1226 if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
1227 /*
1228 * UFS driver might be probed before the phy driver does.
1229 * In that case we would like to return EPROBE_DEFER code.
1230 */
1231 err = -EPROBE_DEFER;
1232 dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
1233 __func__, err);
1234 goto out_variant_clear;
1235 } else if (IS_ERR(host->generic_phy)) {
1198 err = PTR_ERR(host->generic_phy); 1236 err = PTR_ERR(host->generic_phy);
1199 dev_err(dev, "%s: PHY get failed %d\n", __func__, err); 1237 dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
1200 goto out_variant_clear; 1238 goto out_variant_clear;
@@ -1432,7 +1470,8 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
1432 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM); 1470 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
1433 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv); 1471 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
1434 1472
1435 ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1); 1473 /* clear bit 17 - UTP_DBG_RAMS_EN */
1474 ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
1436 1475
1437 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM); 1476 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1438 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv); 1477 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
@@ -1609,6 +1648,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1609 .hce_enable_notify = ufs_qcom_hce_enable_notify, 1648 .hce_enable_notify = ufs_qcom_hce_enable_notify,
1610 .link_startup_notify = ufs_qcom_link_startup_notify, 1649 .link_startup_notify = ufs_qcom_link_startup_notify,
1611 .pwr_change_notify = ufs_qcom_pwr_change_notify, 1650 .pwr_change_notify = ufs_qcom_pwr_change_notify,
1651 .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
1612 .suspend = ufs_qcom_suspend, 1652 .suspend = ufs_qcom_suspend,
1613 .resume = ufs_qcom_resume, 1653 .resume = ufs_qcom_resume,
1614 .dbg_register_dump = ufs_qcom_dump_dbg_regs, 1654 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index a19307a57ce2..fe517cd7dac3 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -142,6 +142,7 @@ enum ufs_qcom_phy_init_type {
142 UFS_QCOM_DBG_PRINT_TEST_BUS_EN) 142 UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
143 143
144/* QUniPro Vendor specific attributes */ 144/* QUniPro Vendor specific attributes */
145#define PA_VS_CONFIG_REG1 0x9000
145#define DME_VS_CORE_CLK_CTRL 0xD002 146#define DME_VS_CORE_CLK_CTRL 0xD002
146/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */ 147/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
147#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8) 148#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index f7983058f3f7..08b799d4efcc 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -134,29 +134,17 @@ struct ufs_dev_fix {
134 */ 134 */
135#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 7) 135#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 7)
136 136
137/*
138 * The max. value PA_SaveConfigTime is 250 (10us) but this is not enough for
139 * some vendors.
140 * Gear switch from PWM to HS may fail even with this max. PA_SaveConfigTime.
141 * Gear switch can be issued by host controller as an error recovery and any
142 * software delay will not help on this case so we need to increase
143 * PA_SaveConfigTime to >32us as per vendor recommendation.
144 */
145#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
137 146
138struct ufs_hba; 147struct ufs_hba;
139void ufs_advertise_fixup_device(struct ufs_hba *hba); 148void ufs_advertise_fixup_device(struct ufs_hba *hba);
140 149
141static struct ufs_dev_fix ufs_fixups[] = {
142 /* UFS cards deviations table */
143 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
144 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
145 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
146 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
147 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
148 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
149 UFS_DEVICE_NO_FASTAUTO),
150 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
151 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
152 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
153 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
154 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
155 UFS_DEVICE_QUIRK_PA_TACTIVATE),
156 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
157 UFS_DEVICE_QUIRK_PA_TACTIVATE),
158 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
159
160 END_FIX
161};
162#endif /* UFS_QUIRKS_H_ */ 150#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index ef8548c3a423..a2c2817fc566 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -185,6 +185,30 @@ ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
185 return ufs_pm_lvl_states[lvl].link_state; 185 return ufs_pm_lvl_states[lvl].link_state;
186} 186}
187 187
188static struct ufs_dev_fix ufs_fixups[] = {
189 /* UFS cards deviations table */
190 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
191 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
192 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
193 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
194 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
195 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
196 UFS_DEVICE_NO_FASTAUTO),
197 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
198 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
199 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
200 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
201 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
202 UFS_DEVICE_QUIRK_PA_TACTIVATE),
203 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
204 UFS_DEVICE_QUIRK_PA_TACTIVATE),
205 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
206 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
207 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
208
209 END_FIX
210};
211
188static void ufshcd_tmc_handler(struct ufs_hba *hba); 212static void ufshcd_tmc_handler(struct ufs_hba *hba);
189static void ufshcd_async_scan(void *data, async_cookie_t cookie); 213static void ufshcd_async_scan(void *data, async_cookie_t cookie);
190static int ufshcd_reset_and_restore(struct ufs_hba *hba); 214static int ufshcd_reset_and_restore(struct ufs_hba *hba);
@@ -288,10 +312,24 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
288 */ 312 */
289static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) 313static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
290{ 314{
291 if (hba->ufs_version == UFSHCI_VERSION_10) 315 u32 intr_mask = 0;
292 return INTERRUPT_MASK_ALL_VER_10; 316
293 else 317 switch (hba->ufs_version) {
294 return INTERRUPT_MASK_ALL_VER_11; 318 case UFSHCI_VERSION_10:
319 intr_mask = INTERRUPT_MASK_ALL_VER_10;
320 break;
321 /* allow fall through */
322 case UFSHCI_VERSION_11:
323 case UFSHCI_VERSION_20:
324 intr_mask = INTERRUPT_MASK_ALL_VER_11;
325 break;
326 /* allow fall through */
327 case UFSHCI_VERSION_21:
328 default:
329 intr_mask = INTERRUPT_MASK_ALL_VER_21;
330 }
331
332 return intr_mask;
295} 333}
296 334
297/** 335/**
@@ -5199,6 +5237,8 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
5199 5237
5200 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) 5238 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
5201 ufshcd_quirk_tune_host_pa_tactivate(hba); 5239 ufshcd_quirk_tune_host_pa_tactivate(hba);
5240
5241 ufshcd_vops_apply_dev_quirks(hba);
5202} 5242}
5203 5243
5204/** 5244/**
@@ -6667,6 +6707,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
6667 /* Get UFS version supported by the controller */ 6707 /* Get UFS version supported by the controller */
6668 hba->ufs_version = ufshcd_get_ufs_version(hba); 6708 hba->ufs_version = ufshcd_get_ufs_version(hba);
6669 6709
6710 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
6711 (hba->ufs_version != UFSHCI_VERSION_11) &&
6712 (hba->ufs_version != UFSHCI_VERSION_20) &&
6713 (hba->ufs_version != UFSHCI_VERSION_21))
6714 dev_err(hba->dev, "invalid UFS version 0x%x\n",
6715 hba->ufs_version);
6716
6670 /* Get Interrupt bit mask per version */ 6717 /* Get Interrupt bit mask per version */
6671 hba->intr_mask = ufshcd_get_intr_mask(hba); 6718 hba->intr_mask = ufshcd_get_intr_mask(hba);
6672 6719
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 7d9ff22acfea..08cd26ed2382 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -266,7 +266,7 @@ struct ufs_pwr_mode_info {
266 * @setup_task_mgmt: called before any task management request is issued 266 * @setup_task_mgmt: called before any task management request is issued
267 * to set some things 267 * to set some things
268 * @hibern8_notify: called around hibern8 enter/exit 268 * @hibern8_notify: called around hibern8 enter/exit
269 * to configure some things 269 * @apply_dev_quirks: called to apply device specific quirks
270 * @suspend: called during host controller PM callback 270 * @suspend: called during host controller PM callback
271 * @resume: called during host controller PM callback 271 * @resume: called during host controller PM callback
272 * @dbg_register_dump: used to dump controller debug information 272 * @dbg_register_dump: used to dump controller debug information
@@ -293,7 +293,8 @@ struct ufs_hba_variant_ops {
293 void (*setup_xfer_req)(struct ufs_hba *, int, bool); 293 void (*setup_xfer_req)(struct ufs_hba *, int, bool);
294 void (*setup_task_mgmt)(struct ufs_hba *, int, u8); 294 void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
295 void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme, 295 void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
296 enum ufs_notify_change_status); 296 enum ufs_notify_change_status);
297 int (*apply_dev_quirks)(struct ufs_hba *);
297 int (*suspend)(struct ufs_hba *, enum ufs_pm_op); 298 int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
298 int (*resume)(struct ufs_hba *, enum ufs_pm_op); 299 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
299 void (*dbg_register_dump)(struct ufs_hba *hba); 300 void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -839,6 +840,13 @@ static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
839 return hba->vops->hibern8_notify(hba, cmd, status); 840 return hba->vops->hibern8_notify(hba, cmd, status);
840} 841}
841 842
843static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
844{
845 if (hba->vops && hba->vops->apply_dev_quirks)
846 return hba->vops->apply_dev_quirks(hba);
847 return 0;
848}
849
842static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op) 850static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
843{ 851{
844 if (hba->vops && hba->vops->suspend) 852 if (hba->vops && hba->vops->suspend)
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 5d978867be57..8c5190e2e1c9 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -72,6 +72,10 @@ enum {
72 REG_UIC_COMMAND_ARG_1 = 0x94, 72 REG_UIC_COMMAND_ARG_1 = 0x94,
73 REG_UIC_COMMAND_ARG_2 = 0x98, 73 REG_UIC_COMMAND_ARG_2 = 0x98,
74 REG_UIC_COMMAND_ARG_3 = 0x9C, 74 REG_UIC_COMMAND_ARG_3 = 0x9C,
75 REG_UFS_CCAP = 0x100,
76 REG_UFS_CRYPTOCAP = 0x104,
77
78 UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400,
75}; 79};
76 80
77/* Controller capability masks */ 81/* Controller capability masks */
@@ -275,6 +279,9 @@ enum {
275 279
276 /* Interrupt disable mask for UFSHCI v1.1 */ 280 /* Interrupt disable mask for UFSHCI v1.1 */
277 INTERRUPT_MASK_ALL_VER_11 = 0x31FFF, 281 INTERRUPT_MASK_ALL_VER_11 = 0x31FFF,
282
283 /* Interrupt disable mask for UFSHCI v2.1 */
284 INTERRUPT_MASK_ALL_VER_21 = 0x71FFF,
278}; 285};
279 286
280/* 287/*