aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDupuis, Chad <chad.dupuis@cavium.com>2017-02-15 09:28:23 -0500
committerMartin K. Petersen <martin.petersen@oracle.com>2017-02-22 19:10:59 -0500
commit61d8658b4a435eac729966cc94cdda077a8df5cd (patch)
tree38926df6bdee37f270681a9dfcdf154162555521
parent67f2db8792f96d8f7521461635d25f9c80245d80 (diff)
scsi: qedf: Add QLogic FastLinQ offload FCoE driver framework.
The QLogic FastLinQ Driver for FCoE (qedf) is the FCoE specific module for 41000 Series Converged Network Adapters by QLogic. This patch consists of following changes: - MAINTAINERS Makefile and Kconfig changes for qedf - PCI driver registration - libfc/fcoe host level initialization - SCSI host template initialization and callbacks - Debugfs and log level infrastructure - Link handling - Firmware interface structures - QED core module initialization - Light L2 interface callbacks - I/O request initialization - Firmware I/O completion handling - Firmware ELS request/response handling - FIP request/response handled by the driver itself Signed-off-by: Nilesh Javali <nilesh.javali@cavium.com> Signed-off-by: Manish Rangankar <manish.rangankar@cavium.com> Signed-off-by: Saurav Kashyap <saurav.kashyap@cavium.com> Signed-off-by: Arun Easi <arun.easi@cavium.com> Signed-off-by: Chad Dupuis <chad.dupuis@cavium.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--MAINTAINERS6
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/qedf/Kconfig11
-rw-r--r--drivers/scsi/qedf/Makefile5
-rw-r--r--drivers/scsi/qedf/qedf.h545
-rw-r--r--drivers/scsi/qedf/qedf_attr.c165
-rw-r--r--drivers/scsi/qedf/qedf_dbg.c195
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h154
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c460
-rw-r--r--drivers/scsi/qedf/qedf_els.c949
-rw-r--r--drivers/scsi/qedf/qedf_fip.c269
-rw-r--r--drivers/scsi/qedf/qedf_hsi.h422
-rw-r--r--drivers/scsi/qedf/qedf_io.c2282
-rw-r--r--drivers/scsi/qedf/qedf_main.c3336
-rw-r--r--drivers/scsi/qedf/qedf_version.h15
16 files changed, 8816 insertions, 0 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 545633d6663d..8a0e01477043 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10242,6 +10242,12 @@ L: linux-scsi@vger.kernel.org
10242S: Supported 10242S: Supported
10243F: drivers/scsi/qedi/ 10243F: drivers/scsi/qedi/
10244 10244
10245QLOGIC QL41xxx FCOE DRIVER
10246M: QLogic-Storage-Upstream@cavium.com
10247L: linux-scsi@vger.kernel.org
10248S: Supported
10249F: drivers/scsi/qedf/
10250
10245QNX4 FILESYSTEM 10251QNX4 FILESYSTEM
10246M: Anders Larsen <al@alarsen.net> 10252M: Anders Larsen <al@alarsen.net>
10247W: http://www.alarsen.net/linux/qnx4fs/ 10253W: http://www.alarsen.net/linux/qnx4fs/
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 255843446e99..8aa9bd34123e 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1235,6 +1235,7 @@ config SCSI_QLOGICPTI
1235source "drivers/scsi/qla2xxx/Kconfig" 1235source "drivers/scsi/qla2xxx/Kconfig"
1236source "drivers/scsi/qla4xxx/Kconfig" 1236source "drivers/scsi/qla4xxx/Kconfig"
1237source "drivers/scsi/qedi/Kconfig" 1237source "drivers/scsi/qedi/Kconfig"
1238source "drivers/scsi/qedf/Kconfig"
1238 1239
1239config SCSI_LPFC 1240config SCSI_LPFC
1240 tristate "Emulex LightPulse Fibre Channel Support" 1241 tristate "Emulex LightPulse Fibre Channel Support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 736b77414a4b..fc2855565a51 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_FCOE) += fcoe/
41obj-$(CONFIG_FCOE_FNIC) += fnic/ 41obj-$(CONFIG_FCOE_FNIC) += fnic/
42obj-$(CONFIG_SCSI_SNIC) += snic/ 42obj-$(CONFIG_SCSI_SNIC) += snic/
43obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/ 43obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
44obj-$(CONFIG_QEDF) += qedf/
44obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o 45obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
45obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o 46obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
46obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o 47obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o
diff --git a/drivers/scsi/qedf/Kconfig b/drivers/scsi/qedf/Kconfig
new file mode 100644
index 000000000000..943f5ee45807
--- /dev/null
+++ b/drivers/scsi/qedf/Kconfig
@@ -0,0 +1,11 @@
1config QEDF
2 tristate "QLogic QEDF 25/40/100Gb FCoE Initiator Driver Support"
3 depends on PCI && SCSI
4 depends on QED
5 depends on LIBFC
6 depends on LIBFCOE
7 select QED_LL2
8 select QED_FCOE
9 ---help---
10 This driver supports FCoE offload for the QLogic FastLinQ
11 41000 Series Converged Network Adapters.
diff --git a/drivers/scsi/qedf/Makefile b/drivers/scsi/qedf/Makefile
new file mode 100644
index 000000000000..64e9f507ce32
--- /dev/null
+++ b/drivers/scsi/qedf/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_QEDF) := qedf.o
2qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \
3 qedf_attr.o qedf_els.o
4
5qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
new file mode 100644
index 000000000000..96346a1b1515
--- /dev/null
+++ b/drivers/scsi/qedf/qedf.h
@@ -0,0 +1,545 @@
1/*
2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9#ifndef _QEDFC_H_
10#define _QEDFC_H_
11
12#include <scsi/libfcoe.h>
13#include <scsi/libfc.h>
14#include <scsi/fc/fc_fip.h>
15#include <scsi/fc/fc_fc2.h>
16#include <scsi/scsi_tcq.h>
17#include <scsi/fc_encode.h>
18#include <linux/version.h>
19
20
21/* qedf_hsi.h needs to before included any qed includes */
22#include "qedf_hsi.h"
23
24#include <linux/qed/qed_if.h>
25#include <linux/qed/qed_fcoe_if.h>
26#include <linux/qed/qed_ll2_if.h>
27#include "qedf_version.h"
28#include "qedf_dbg.h"
29
30/* Helpers to extract upper and lower 32-bits of pointer */
31#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
32#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
33
34#define QEDF_DESCR "QLogic FCoE Offload Driver"
35#define QEDF_MODULE_NAME "qedf"
36
37#define QEDF_MIN_XID 0
38#define QEDF_MAX_SCSI_XID (NUM_TASKS_PER_CONNECTION - 1)
39#define QEDF_MAX_ELS_XID 4095
40#define QEDF_FLOGI_RETRY_CNT 3
41#define QEDF_RPORT_RETRY_CNT 255
42#define QEDF_MAX_SESSIONS 1024
43#define QEDF_MAX_PAYLOAD 2048
44#define QEDF_MAX_BDS_PER_CMD 256
45#define QEDF_MAX_BD_LEN 0xffff
46#define QEDF_BD_SPLIT_SZ 0x1000
47#define QEDF_PAGE_SIZE 4096
48#define QED_HW_DMA_BOUNDARY 0xfff
49#define QEDF_MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1)
50#define QEDF_MFS (QEDF_MAX_PAYLOAD + \
51 sizeof(struct fc_frame_header))
52#define QEDF_MAX_NPIV 64
53#define QEDF_TM_TIMEOUT 10
54#define QEDF_ABORT_TIMEOUT 10
55#define QEDF_CLEANUP_TIMEOUT 10
56#define QEDF_MAX_CDB_LEN 16
57
58#define UPSTREAM_REMOVE 1
59#define UPSTREAM_KEEP 1
60
61struct qedf_mp_req {
62 uint8_t tm_flags;
63
64 uint32_t req_len;
65 void *req_buf;
66 dma_addr_t req_buf_dma;
67 struct fcoe_sge *mp_req_bd;
68 dma_addr_t mp_req_bd_dma;
69 struct fc_frame_header req_fc_hdr;
70
71 uint32_t resp_len;
72 void *resp_buf;
73 dma_addr_t resp_buf_dma;
74 struct fcoe_sge *mp_resp_bd;
75 dma_addr_t mp_resp_bd_dma;
76 struct fc_frame_header resp_fc_hdr;
77};
78
79struct qedf_els_cb_arg {
80 struct qedf_ioreq *aborted_io_req;
81 struct qedf_ioreq *io_req;
82 u8 op; /* Used to keep track of ELS op */
83 uint16_t l2_oxid;
84 u32 offset; /* Used for sequence cleanup */
85 u8 r_ctl; /* Used for sequence cleanup */
86};
87
88enum qedf_ioreq_event {
89 QEDF_IOREQ_EV_ABORT_SUCCESS,
90 QEDF_IOREQ_EV_ABORT_FAILED,
91 QEDF_IOREQ_EV_SEND_RRQ,
92 QEDF_IOREQ_EV_ELS_TMO,
93 QEDF_IOREQ_EV_ELS_ERR_DETECT,
94 QEDF_IOREQ_EV_ELS_FLUSH,
95 QEDF_IOREQ_EV_CLEANUP_SUCCESS,
96 QEDF_IOREQ_EV_CLEANUP_FAILED,
97};
98
99#define FC_GOOD 0
100#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
101#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
102#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
103#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
104#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
105struct qedf_ioreq {
106 struct list_head link;
107 uint16_t xid;
108 struct scsi_cmnd *sc_cmd;
109 bool use_slowpath; /* Use slow SGL for this I/O */
110#define QEDF_SCSI_CMD 1
111#define QEDF_TASK_MGMT_CMD 2
112#define QEDF_ABTS 3
113#define QEDF_ELS 4
114#define QEDF_CLEANUP 5
115#define QEDF_SEQ_CLEANUP 6
116 u8 cmd_type;
117#define QEDF_CMD_OUTSTANDING 0x0
118#define QEDF_CMD_IN_ABORT 0x1
119#define QEDF_CMD_IN_CLEANUP 0x2
120#define QEDF_CMD_SRR_SENT 0x3
121 u8 io_req_flags;
122 struct qedf_rport *fcport;
123 unsigned long flags;
124 enum qedf_ioreq_event event;
125 size_t data_xfer_len;
126 struct kref refcount;
127 struct qedf_cmd_mgr *cmd_mgr;
128 struct io_bdt *bd_tbl;
129 struct delayed_work timeout_work;
130 struct completion tm_done;
131 struct completion abts_done;
132 struct fcoe_task_context *task;
133 int idx;
134/*
135 * Need to allocate enough room for both sense data and FCP response data
136 * which has a max length of 8 bytes according to spec.
137 */
138#define QEDF_SCSI_SENSE_BUFFERSIZE (SCSI_SENSE_BUFFERSIZE + 8)
139 uint8_t *sense_buffer;
140 dma_addr_t sense_buffer_dma;
141 u32 fcp_resid;
142 u32 fcp_rsp_len;
143 u32 fcp_sns_len;
144 u8 cdb_status;
145 u8 fcp_status;
146 u8 fcp_rsp_code;
147 u8 scsi_comp_flags;
148#define QEDF_MAX_REUSE 0xfff
149 u16 reuse_count;
150 struct qedf_mp_req mp_req;
151 void (*cb_func)(struct qedf_els_cb_arg *cb_arg);
152 struct qedf_els_cb_arg *cb_arg;
153 int fp_idx;
154 unsigned int cpu;
155 unsigned int int_cpu;
156#define QEDF_IOREQ_SLOW_SGE 0
157#define QEDF_IOREQ_SINGLE_SGE 1
158#define QEDF_IOREQ_FAST_SGE 2
159 u8 sge_type;
160 struct delayed_work rrq_work;
161
162 /* Used for sequence level recovery; i.e. REC/SRR */
163 uint32_t rx_buf_off;
164 uint32_t tx_buf_off;
165 uint32_t rx_id;
166 uint32_t task_retry_identifier;
167
168 /*
169 * Used to tell if we need to return a SCSI command
170 * during some form of error processing.
171 */
172 bool return_scsi_cmd_on_abts;
173};
174
175extern struct workqueue_struct *qedf_io_wq;
176
177struct qedf_rport {
178 spinlock_t rport_lock;
179#define QEDF_RPORT_SESSION_READY 1
180#define QEDF_RPORT_UPLOADING_CONNECTION 2
181 unsigned long flags;
182 unsigned long retry_delay_timestamp;
183 struct fc_rport *rport;
184 struct fc_rport_priv *rdata;
185 struct qedf_ctx *qedf;
186 u32 handle; /* Handle from qed */
187 u32 fw_cid; /* fw_cid from qed */
188 void __iomem *p_doorbell;
189 /* Send queue management */
190 atomic_t free_sqes;
191 atomic_t num_active_ios;
192 struct fcoe_wqe *sq;
193 dma_addr_t sq_dma;
194 u16 sq_prod_idx;
195 u16 fw_sq_prod_idx;
196 u16 sq_con_idx;
197 u32 sq_mem_size;
198 void *sq_pbl;
199 dma_addr_t sq_pbl_dma;
200 u32 sq_pbl_size;
201 u32 sid;
202#define QEDF_RPORT_TYPE_DISK 1
203#define QEDF_RPORT_TYPE_TAPE 2
204 uint dev_type; /* Disk or tape */
205 struct list_head peers;
206};
207
208/* Used to contain LL2 skb's in ll2_skb_list */
209struct qedf_skb_work {
210 struct work_struct work;
211 struct sk_buff *skb;
212 struct qedf_ctx *qedf;
213};
214
215struct qedf_fastpath {
216#define QEDF_SB_ID_NULL 0xffff
217 u16 sb_id;
218 struct qed_sb_info *sb_info;
219 struct qedf_ctx *qedf;
220 /* Keep track of number of completions on this fastpath */
221 unsigned long completions;
222 uint32_t cq_num_entries;
223};
224
225/* Used to pass fastpath information needed to process CQEs */
226struct qedf_io_work {
227 struct work_struct work;
228 struct fcoe_cqe cqe;
229 struct qedf_ctx *qedf;
230 struct fc_frame *fp;
231};
232
233struct qedf_glbl_q_params {
234 u64 hw_p_cq; /* Completion queue PBL */
235 u64 hw_p_rq; /* Request queue PBL */
236 u64 hw_p_cmdq; /* Command queue PBL */
237};
238
239struct global_queue {
240 struct fcoe_cqe *cq;
241 dma_addr_t cq_dma;
242 u32 cq_mem_size;
243 u32 cq_cons_idx; /* Completion queue consumer index */
244 u32 cq_prod_idx;
245
246 void *cq_pbl;
247 dma_addr_t cq_pbl_dma;
248 u32 cq_pbl_size;
249};
250
251/* I/O tracing entry */
252#define QEDF_IO_TRACE_SIZE 2048
253struct qedf_io_log {
254#define QEDF_IO_TRACE_REQ 0
255#define QEDF_IO_TRACE_RSP 1
256 uint8_t direction;
257 uint16_t task_id;
258 uint32_t port_id; /* Remote port fabric ID */
259 int lun;
260 char op; /* SCSI CDB */
261 uint8_t lba[4];
262 unsigned int bufflen; /* SCSI buffer length */
263 unsigned int sg_count; /* Number of SG elements */
264 int result; /* Result passed back to mid-layer */
265 unsigned long jiffies; /* Time stamp when I/O logged */
266 int refcount; /* Reference count for task id */
267 unsigned int req_cpu; /* CPU that the task is queued on */
268 unsigned int int_cpu; /* Interrupt CPU that the task is received on */
269 unsigned int rsp_cpu; /* CPU that task is returned on */
270 u8 sge_type; /* Did we take the slow, single or fast SGE path */
271};
272
273/* Number of entries in BDQ */
274#define QEDF_BDQ_SIZE 256
275#define QEDF_BDQ_BUF_SIZE 2072
276
277/* DMA coherent buffers for BDQ */
278struct qedf_bdq_buf {
279 void *buf_addr;
280 dma_addr_t buf_dma;
281};
282
283/* Main adapter struct */
284struct qedf_ctx {
285 struct qedf_dbg_ctx dbg_ctx;
286 struct fcoe_ctlr ctlr;
287 struct fc_lport *lport;
288 u8 data_src_addr[ETH_ALEN];
289#define QEDF_LINK_DOWN 0
290#define QEDF_LINK_UP 1
291 atomic_t link_state;
292#define QEDF_DCBX_PENDING 0
293#define QEDF_DCBX_DONE 1
294 atomic_t dcbx;
295 uint16_t max_scsi_xid;
296 uint16_t max_els_xid;
297#define QEDF_NULL_VLAN_ID -1
298#define QEDF_FALLBACK_VLAN 1002
299#define QEDF_DEFAULT_PRIO 3
300 int vlan_id;
301 uint vlan_hw_insert:1;
302 struct qed_dev *cdev;
303 struct qed_dev_fcoe_info dev_info;
304 struct qed_int_info int_info;
305 uint16_t last_command;
306 spinlock_t hba_lock;
307 struct pci_dev *pdev;
308 u64 wwnn;
309 u64 wwpn;
310 u8 __aligned(16) mac[ETH_ALEN];
311 struct list_head fcports;
312 atomic_t num_offloads;
313 unsigned int curr_conn_id;
314 struct workqueue_struct *ll2_recv_wq;
315 struct workqueue_struct *link_update_wq;
316 struct delayed_work link_update;
317 struct delayed_work link_recovery;
318 struct completion flogi_compl;
319 struct completion fipvlan_compl;
320
321 /*
322 * Used to tell if we're in the window where we are waiting for
323 * the link to come back up before informting fcoe that the link is
324 * done.
325 */
326 atomic_t link_down_tmo_valid;
327#define QEDF_TIMER_INTERVAL (1 * HZ)
328 struct timer_list timer; /* One second book keeping timer */
329#define QEDF_DRAIN_ACTIVE 1
330#define QEDF_LL2_STARTED 2
331#define QEDF_UNLOADING 3
332#define QEDF_GRCDUMP_CAPTURE 4
333#define QEDF_IN_RECOVERY 5
334#define QEDF_DBG_STOP_IO 6
335 unsigned long flags; /* Miscellaneous state flags */
336 int fipvlan_retries;
337 u8 num_queues;
338 struct global_queue **global_queues;
339 /* Pointer to array of queue structures */
340 struct qedf_glbl_q_params *p_cpuq;
341 /* Physical address of array of queue structures */
342 dma_addr_t hw_p_cpuq;
343
344 struct qedf_bdq_buf bdq[QEDF_BDQ_SIZE];
345 void *bdq_pbl;
346 dma_addr_t bdq_pbl_dma;
347 size_t bdq_pbl_mem_size;
348 void *bdq_pbl_list;
349 dma_addr_t bdq_pbl_list_dma;
350 u8 bdq_pbl_list_num_entries;
351 void __iomem *bdq_primary_prod;
352 void __iomem *bdq_secondary_prod;
353 uint16_t bdq_prod_idx;
354
355 /* Structure for holding all the fastpath for this qedf_ctx */
356 struct qedf_fastpath *fp_array;
357 struct qed_fcoe_tid tasks;
358 struct qedf_cmd_mgr *cmd_mgr;
359 /* Holds the PF parameters we pass to qed to start he FCoE function */
360 struct qed_pf_params pf_params;
361 /* Used to time middle path ELS and TM commands */
362 struct workqueue_struct *timer_work_queue;
363
364#define QEDF_IO_WORK_MIN 64
365 mempool_t *io_mempool;
366 struct workqueue_struct *dpc_wq;
367
368 u32 slow_sge_ios;
369 u32 fast_sge_ios;
370 u32 single_sge_ios;
371
372 uint8_t *grcdump;
373 uint32_t grcdump_size;
374
375 struct qedf_io_log io_trace_buf[QEDF_IO_TRACE_SIZE];
376 spinlock_t io_trace_lock;
377 uint16_t io_trace_idx;
378
379 bool stop_io_on_error;
380
381 u32 flogi_cnt;
382 u32 flogi_failed;
383
384 /* Used for fc statistics */
385 u64 input_requests;
386 u64 output_requests;
387 u64 control_requests;
388 u64 packet_aborts;
389 u64 alloc_failures;
390};
391
392struct io_bdt {
393 struct qedf_ioreq *io_req;
394 struct fcoe_sge *bd_tbl;
395 dma_addr_t bd_tbl_dma;
396 u16 bd_valid;
397};
398
399struct qedf_cmd_mgr {
400 struct qedf_ctx *qedf;
401 u16 idx;
402 struct io_bdt **io_bdt_pool;
403#define FCOE_PARAMS_NUM_TASKS 4096
404 struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS];
405 spinlock_t lock;
406 atomic_t free_list_cnt;
407};
408
409/* Stolen from qed_cxt_api.h and adapted for qed_fcoe_info
410 * Usage:
411 *
412 * void *ptr;
413 * ptr = qedf_get_task_mem(&qedf->tasks, 128);
414 */
415static inline void *qedf_get_task_mem(struct qed_fcoe_tid *info, u32 tid)
416{
417 return (void *)(info->blocks[tid / info->num_tids_per_block] +
418 (tid % info->num_tids_per_block) * info->size);
419}
420
421static inline void qedf_stop_all_io(struct qedf_ctx *qedf)
422{
423 set_bit(QEDF_DBG_STOP_IO, &qedf->flags);
424}
425
426/*
427 * Externs
428 */
429#define QEDF_DEFAULT_LOG_MASK 0x3CFB6
430extern const struct qed_fcoe_ops *qed_ops;
431extern uint qedf_dump_frames;
432extern uint qedf_io_tracing;
433extern uint qedf_stop_io_on_error;
434extern uint qedf_link_down_tmo;
435#define QEDF_RETRY_DELAY_MAX 20 /* 2 seconds */
436extern bool qedf_retry_delay;
437extern uint qedf_debug;
438
439extern struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf);
440extern void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr);
441extern int qedf_queuecommand(struct Scsi_Host *host,
442 struct scsi_cmnd *sc_cmd);
443extern void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
444extern void qedf_update_src_mac(struct fc_lport *lport, u8 *addr);
445extern u8 *qedf_get_src_mac(struct fc_lport *lport);
446extern void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb);
447extern void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf);
448extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
449 struct qedf_ioreq *io_req);
450extern void qedf_process_warning_compl(struct qedf_ctx *qedf,
451 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
452extern void qedf_process_error_detect(struct qedf_ctx *qedf,
453 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
454extern void qedf_flush_active_ios(struct qedf_rport *fcport, int lun);
455extern void qedf_release_cmd(struct kref *ref);
456extern int qedf_initiate_abts(struct qedf_ioreq *io_req,
457 bool return_scsi_cmd_on_abts);
458extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
459 struct qedf_ioreq *io_req);
460extern struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport,
461 u8 cmd_type);
462
463extern struct device_attribute *qedf_host_attrs[];
464extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
465 unsigned int timer_msec);
466extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
467extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
468 struct fcoe_task_context *task_ctx);
469extern void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid,
470 u32 ptu_invalidate, enum fcoe_task_type req_type, u32 offset);
471extern void qedf_ring_doorbell(struct qedf_rport *fcport);
472extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
473 struct qedf_ioreq *els_req);
474extern int qedf_send_rrq(struct qedf_ioreq *aborted_io_req);
475extern int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp);
476extern int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
477 bool return_scsi_cmd_on_abts);
478extern void qedf_process_cleanup_compl(struct qedf_ctx *qedf,
479 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
480extern int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags);
481extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
482 struct qedf_ioreq *io_req);
483extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe);
484extern void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
485 int result);
486extern void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id);
487extern void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf);
488extern void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf);
489extern void qedf_capture_grc_dump(struct qedf_ctx *qedf);
490extern void qedf_wait_for_upload(struct qedf_ctx *qedf);
491extern void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
492 struct fcoe_cqe *cqe);
493extern void qedf_restart_rport(struct qedf_rport *fcport);
494extern int qedf_send_rec(struct qedf_ioreq *orig_io_req);
495extern int qedf_post_io_req(struct qedf_rport *fcport,
496 struct qedf_ioreq *io_req);
497extern void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
498 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
499extern int qedf_send_flogi(struct qedf_ctx *qedf);
500extern void qedf_fp_io_handler(struct work_struct *work);
501
502#define FCOE_WORD_TO_BYTE 4
503#define QEDF_MAX_TASK_NUM 0xFFFF
504
505struct fip_vlan {
506 struct ethhdr eth;
507 struct fip_header fip;
508 struct {
509 struct fip_mac_desc mac;
510 struct fip_wwn_desc wwnn;
511 } desc;
512};
513
514/* SQ/CQ Sizes */
515#define GBL_RSVD_TASKS 16
516#define NUM_TASKS_PER_CONNECTION 1024
517#define NUM_RW_TASKS_PER_CONNECTION 512
518#define FCOE_PARAMS_CQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS
519
520#define FCOE_PARAMS_CMDQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS
521#define SQ_NUM_ENTRIES NUM_TASKS_PER_CONNECTION
522
523#define QEDF_FCOE_PARAMS_GL_RQ_PI 0
524#define QEDF_FCOE_PARAMS_GL_CMD_PI 1
525
526#define QEDF_READ (1 << 1)
527#define QEDF_WRITE (1 << 0)
528#define MAX_FIBRE_LUNS 0xffffffff
529
530#define QEDF_MAX_NUM_CQS 8
531
532/*
533 * PCI function probe defines
534 */
535/* Probe/remove called during normal PCI probe */
536#define QEDF_MODE_NORMAL 0
537/* Probe/remove called from qed error recovery */
538#define QEDF_MODE_RECOVERY 1
539
540#define SUPPORTED_25000baseKR_Full (1<<27)
541#define SUPPORTED_50000baseKR2_Full (1<<28)
542#define SUPPORTED_100000baseKR4_Full (1<<29)
543#define SUPPORTED_100000baseCR4_Full (1<<30)
544
545#endif
diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c
new file mode 100644
index 000000000000..47720611ad2c
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_attr.c
@@ -0,0 +1,165 @@
1/*
2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9#include "qedf.h"
10
11static ssize_t
12qedf_fcoe_mac_show(struct device *dev,
13 struct device_attribute *attr, char *buf)
14{
15 struct fc_lport *lport = shost_priv(class_to_shost(dev));
16 u32 port_id;
17 u8 lport_src_id[3];
18 u8 fcoe_mac[6];
19
20 port_id = fc_host_port_id(lport->host);
21 lport_src_id[2] = (port_id & 0x000000FF);
22 lport_src_id[1] = (port_id & 0x0000FF00) >> 8;
23 lport_src_id[0] = (port_id & 0x00FF0000) >> 16;
24 fc_fcoe_set_mac(fcoe_mac, lport_src_id);
25
26 return scnprintf(buf, PAGE_SIZE, "%pM\n", fcoe_mac);
27}
28
29static DEVICE_ATTR(fcoe_mac, S_IRUGO, qedf_fcoe_mac_show, NULL);
30
31struct device_attribute *qedf_host_attrs[] = {
32 &dev_attr_fcoe_mac,
33 NULL,
34};
35
36extern const struct qed_fcoe_ops *qed_ops;
37
38inline bool qedf_is_vport(struct qedf_ctx *qedf)
39{
40 return (!(qedf->lport->vport == NULL));
41}
42
43/* Get base qedf for physical port from vport */
44static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf)
45{
46 struct fc_lport *lport;
47 struct fc_lport *base_lport;
48
49 if (!(qedf_is_vport(qedf)))
50 return NULL;
51
52 lport = qedf->lport;
53 base_lport = shost_priv(vport_to_shost(lport->vport));
54 return (struct qedf_ctx *)(lport_priv(base_lport));
55}
56
57void qedf_capture_grc_dump(struct qedf_ctx *qedf)
58{
59 struct qedf_ctx *base_qedf;
60
61 /* Make sure we use the base qedf to take the GRC dump */
62 if (qedf_is_vport(qedf))
63 base_qedf = qedf_get_base_qedf(qedf);
64 else
65 base_qedf = qedf;
66
67 if (test_bit(QEDF_GRCDUMP_CAPTURE, &base_qedf->flags)) {
68 QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_INFO,
69 "GRC Dump already captured.\n");
70 return;
71 }
72
73
74 qedf_get_grc_dump(base_qedf->cdev, qed_ops->common,
75 &base_qedf->grcdump, &base_qedf->grcdump_size);
76 QEDF_ERR(&(base_qedf->dbg_ctx), "GRC Dump captured.\n");
77 set_bit(QEDF_GRCDUMP_CAPTURE, &base_qedf->flags);
78 qedf_uevent_emit(base_qedf->lport->host, QEDF_UEVENT_CODE_GRCDUMP,
79 NULL);
80}
81
82static ssize_t
83qedf_sysfs_read_grcdump(struct file *filep, struct kobject *kobj,
84 struct bin_attribute *ba, char *buf, loff_t off,
85 size_t count)
86{
87 ssize_t ret = 0;
88 struct fc_lport *lport = shost_priv(dev_to_shost(container_of(kobj,
89 struct device, kobj)));
90 struct qedf_ctx *qedf = lport_priv(lport);
91
92 if (test_bit(QEDF_GRCDUMP_CAPTURE, &qedf->flags)) {
93 ret = memory_read_from_buffer(buf, count, &off,
94 qedf->grcdump, qedf->grcdump_size);
95 } else {
96 QEDF_ERR(&(qedf->dbg_ctx), "GRC Dump not captured!\n");
97 }
98
99 return ret;
100}
101
102static ssize_t
103qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
104 struct bin_attribute *ba, char *buf, loff_t off,
105 size_t count)
106{
107 struct fc_lport *lport = NULL;
108 struct qedf_ctx *qedf = NULL;
109 long reading;
110 int ret = 0;
111 char msg[40];
112
113 if (off != 0)
114 return ret;
115
116
117 lport = shost_priv(dev_to_shost(container_of(kobj,
118 struct device, kobj)));
119 qedf = lport_priv(lport);
120
121 buf[1] = 0;
122 ret = kstrtol(buf, 10, &reading);
123 if (ret) {
124 QEDF_ERR(&(qedf->dbg_ctx), "Invalid input, err(%d)\n", ret);
125 return ret;
126 }
127
128 memset(msg, 0, sizeof(msg));
129 switch (reading) {
130 case 0:
131 memset(qedf->grcdump, 0, qedf->grcdump_size);
132 clear_bit(QEDF_GRCDUMP_CAPTURE, &qedf->flags);
133 break;
134 case 1:
135 qedf_capture_grc_dump(qedf);
136 break;
137 }
138
139 return count;
140}
141
142static struct bin_attribute sysfs_grcdump_attr = {
143 .attr = {
144 .name = "grcdump",
145 .mode = S_IRUSR | S_IWUSR,
146 },
147 .size = 0,
148 .read = qedf_sysfs_read_grcdump,
149 .write = qedf_sysfs_write_grcdump,
150};
151
152static struct sysfs_bin_attrs bin_file_entries[] = {
153 {"grcdump", &sysfs_grcdump_attr},
154 {NULL},
155};
156
157void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf)
158{
159 qedf_create_sysfs_attr(qedf->lport->host, bin_file_entries);
160}
161
162void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf)
163{
164 qedf_remove_sysfs_attr(qedf->lport->host, bin_file_entries);
165}
diff --git a/drivers/scsi/qedf/qedf_dbg.c b/drivers/scsi/qedf/qedf_dbg.c
new file mode 100644
index 000000000000..e023f5d0dc12
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_dbg.c
@@ -0,0 +1,195 @@
1/*
2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9#include "qedf_dbg.h"
10#include <linux/vmalloc.h>
11
12void
13qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
14 const char *fmt, ...)
15{
16 va_list va;
17 struct va_format vaf;
18 char nfunc[32];
19
20 memset(nfunc, 0, sizeof(nfunc));
21 memcpy(nfunc, func, sizeof(nfunc) - 1);
22
23 va_start(va, fmt);
24
25 vaf.fmt = fmt;
26 vaf.va = &va;
27
28 if (likely(qedf) && likely(qedf->pdev))
29 pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
30 nfunc, line, qedf->host_no, &vaf);
31 else
32 pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
33
34 va_end(va);
35}
36
37void
38qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
39 const char *fmt, ...)
40{
41 va_list va;
42 struct va_format vaf;
43 char nfunc[32];
44
45 memset(nfunc, 0, sizeof(nfunc));
46 memcpy(nfunc, func, sizeof(nfunc) - 1);
47
48 va_start(va, fmt);
49
50 vaf.fmt = fmt;
51 vaf.va = &va;
52
53 if (!(qedf_debug & QEDF_LOG_WARN))
54 goto ret;
55
56 if (likely(qedf) && likely(qedf->pdev))
57 pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
58 nfunc, line, qedf->host_no, &vaf);
59 else
60 pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
61
62ret:
63 va_end(va);
64}
65
66void
67qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
68 const char *fmt, ...)
69{
70 va_list va;
71 struct va_format vaf;
72 char nfunc[32];
73
74 memset(nfunc, 0, sizeof(nfunc));
75 memcpy(nfunc, func, sizeof(nfunc) - 1);
76
77 va_start(va, fmt);
78
79 vaf.fmt = fmt;
80 vaf.va = &va;
81
82 if (!(qedf_debug & QEDF_LOG_NOTICE))
83 goto ret;
84
85 if (likely(qedf) && likely(qedf->pdev))
86 pr_notice("[%s]:[%s:%d]:%d: %pV",
87 dev_name(&(qedf->pdev->dev)), nfunc, line,
88 qedf->host_no, &vaf);
89 else
90 pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
91
92ret:
93 va_end(va);
94}
95
96void
97qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
98 u32 level, const char *fmt, ...)
99{
100 va_list va;
101 struct va_format vaf;
102 char nfunc[32];
103
104 memset(nfunc, 0, sizeof(nfunc));
105 memcpy(nfunc, func, sizeof(nfunc) - 1);
106
107 va_start(va, fmt);
108
109 vaf.fmt = fmt;
110 vaf.va = &va;
111
112 if (!(qedf_debug & level))
113 goto ret;
114
115 if (likely(qedf) && likely(qedf->pdev))
116 pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
117 nfunc, line, qedf->host_no, &vaf);
118 else
119 pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
120
121ret:
122 va_end(va);
123}
124
125int
126qedf_alloc_grc_dump_buf(u8 **buf, uint32_t len)
127{
128 *buf = vmalloc(len);
129 if (!(*buf))
130 return -ENOMEM;
131
132 memset(*buf, 0, len);
133 return 0;
134}
135
136void
137qedf_free_grc_dump_buf(uint8_t **buf)
138{
139 vfree(*buf);
140 *buf = NULL;
141}
142
143int
144qedf_get_grc_dump(struct qed_dev *cdev, const struct qed_common_ops *common,
145 u8 **buf, uint32_t *grcsize)
146{
147 if (!*buf)
148 return -EINVAL;
149
150 return common->dbg_grc(cdev, *buf, grcsize);
151}
152
153void
154qedf_uevent_emit(struct Scsi_Host *shost, u32 code, char *msg)
155{
156 char event_string[40];
157 char *envp[] = {event_string, NULL};
158
159 memset(event_string, 0, sizeof(event_string));
160 switch (code) {
161 case QEDF_UEVENT_CODE_GRCDUMP:
162 if (msg)
163 strncpy(event_string, msg, strlen(msg));
164 else
165 sprintf(event_string, "GRCDUMP=%u", shost->host_no);
166 break;
167 default:
168 /* do nothing */
169 break;
170 }
171
172 kobject_uevent_env(&shost->shost_gendev.kobj, KOBJ_CHANGE, envp);
173}
174
175int
176qedf_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
177{
178 int ret = 0;
179
180 for (; iter->name; iter++) {
181 ret = sysfs_create_bin_file(&shost->shost_gendev.kobj,
182 iter->attr);
183 if (ret)
184 pr_err("Unable to create sysfs %s attr, err(%d).\n",
185 iter->name, ret);
186 }
187 return ret;
188}
189
190void
191qedf_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
192{
193 for (; iter->name; iter++)
194 sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr);
195}
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
new file mode 100644
index 000000000000..23bd70628a2f
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -0,0 +1,154 @@
1/*
2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9#ifndef _QEDF_DBG_H_
10#define _QEDF_DBG_H_
11
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/compiler.h>
15#include <linux/string.h>
16#include <linux/version.h>
17#include <linux/pci.h>
18#include <linux/delay.h>
19#include <scsi/scsi_transport.h>
20#include <linux/fs.h>
21
22#include <linux/qed/common_hsi.h>
23#include <linux/qed/qed_if.h>
24
25extern uint qedf_debug;
26
27/* Debug print level definitions */
28#define QEDF_LOG_DEFAULT 0x1 /* Set default logging mask */
29#define QEDF_LOG_INFO 0x2 /*
30 * Informational logs,
31 * MAC address, WWPN, WWNN
32 */
33#define QEDF_LOG_DISC 0x4 /* Init, discovery, rport */
34#define QEDF_LOG_LL2 0x8 /* LL2, VLAN logs */
35#define QEDF_LOG_CONN 0x10 /* Connection setup, cleanup */
36#define QEDF_LOG_EVT 0x20 /* Events, link, mtu */
37#define QEDF_LOG_TIMER 0x40 /* Timer events */
38#define QEDF_LOG_MP_REQ 0x80 /* Middle Path (MP) logs */
39#define QEDF_LOG_SCSI_TM 0x100 /* SCSI Aborts, Task Mgmt */
40#define QEDF_LOG_UNSOL 0x200 /* unsolicited event logs */
41#define QEDF_LOG_IO 0x400 /* scsi cmd, completion */
42#define QEDF_LOG_MQ 0x800 /* Multi Queue logs */
43#define QEDF_LOG_BSG 0x1000 /* BSG logs */
44#define QEDF_LOG_DEBUGFS 0x2000 /* debugFS logs */
45#define QEDF_LOG_LPORT 0x4000 /* lport logs */
46#define QEDF_LOG_ELS 0x8000 /* ELS logs */
47#define QEDF_LOG_NPIV 0x10000 /* NPIV logs */
48#define QEDF_LOG_SESS 0x20000 /* Conection setup, cleanup */
49#define QEDF_LOG_TID 0x80000 /*
50 * FW TID context acquire
51 * free
52 */
53#define QEDF_TRACK_TID 0x100000 /*
54 * Track TID state. To be
55 * enabled only at module load
56 * and not run-time.
57 */
58#define QEDF_TRACK_CMD_LIST 0x300000 /*
59 * Track active cmd list nodes,
60 * done with reference to TID,
61 * hence TRACK_TID also enabled.
62 */
63#define QEDF_LOG_NOTICE 0x40000000 /* Notice logs */
64#define QEDF_LOG_WARN 0x80000000 /* Warning logs */
65
66/* Debug context structure */
67struct qedf_dbg_ctx {
68 unsigned int host_no;
69 struct pci_dev *pdev;
70#ifdef CONFIG_DEBUG_FS
71 struct dentry *bdf_dentry;
72#endif
73};
74
75#define QEDF_ERR(pdev, fmt, ...) \
76 qedf_dbg_err(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
77#define QEDF_WARN(pdev, fmt, ...) \
78 qedf_dbg_warn(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
79#define QEDF_NOTICE(pdev, fmt, ...) \
80 qedf_dbg_notice(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
81#define QEDF_INFO(pdev, level, fmt, ...) \
82 qedf_dbg_info(pdev, __func__, __LINE__, level, fmt, \
83 ## __VA_ARGS__)
84
85extern void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
86 const char *fmt, ...);
87extern void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
88 const char *, ...);
89extern void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func,
90 u32 line, const char *, ...);
91extern void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
92 u32 info, const char *fmt, ...);
93
94/* GRC Dump related defines */
95
96struct Scsi_Host;
97
98#define QEDF_UEVENT_CODE_GRCDUMP 0
99
100struct sysfs_bin_attrs {
101 char *name;
102 struct bin_attribute *attr;
103};
104
105extern int qedf_alloc_grc_dump_buf(uint8_t **buf, uint32_t len);
106extern void qedf_free_grc_dump_buf(uint8_t **buf);
107extern int qedf_get_grc_dump(struct qed_dev *cdev,
108 const struct qed_common_ops *common, uint8_t **buf,
109 uint32_t *grcsize);
110extern void qedf_uevent_emit(struct Scsi_Host *shost, u32 code, char *msg);
111extern int qedf_create_sysfs_attr(struct Scsi_Host *shost,
112 struct sysfs_bin_attrs *iter);
113extern void qedf_remove_sysfs_attr(struct Scsi_Host *shost,
114 struct sysfs_bin_attrs *iter);
115
116#ifdef CONFIG_DEBUG_FS
117/* DebugFS related code */
118struct qedf_list_of_funcs {
119 char *oper_str;
120 ssize_t (*oper_func)(struct qedf_dbg_ctx *qedf);
121};
122
123struct qedf_debugfs_ops {
124 char *name;
125 struct qedf_list_of_funcs *qedf_funcs;
126};
127
128#define qedf_dbg_fileops(drv, ops) \
129{ \
130 .owner = THIS_MODULE, \
131 .open = simple_open, \
132 .read = drv##_dbg_##ops##_cmd_read, \
133 .write = drv##_dbg_##ops##_cmd_write \
134}
135
136/* Used for debugfs sequential files */
137#define qedf_dbg_fileops_seq(drv, ops) \
138{ \
139 .owner = THIS_MODULE, \
140 .open = drv##_dbg_##ops##_open, \
141 .read = seq_read, \
142 .llseek = seq_lseek, \
143 .release = single_release, \
144}
145
146extern void qedf_dbg_host_init(struct qedf_dbg_ctx *qedf,
147 struct qedf_debugfs_ops *dops,
148 struct file_operations *fops);
149extern void qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf);
150extern void qedf_dbg_init(char *drv_name);
151extern void qedf_dbg_exit(void);
152#endif /* CONFIG_DEBUG_FS */
153
154#endif /* _QEDF_DBG_H_ */
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
new file mode 100644
index 000000000000..cb08b625c594
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -0,0 +1,460 @@
1/*
2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016 QLogic Corporation
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9#ifdef CONFIG_DEBUG_FS
10
11#include <linux/uaccess.h>
12#include <linux/debugfs.h>
13#include <linux/module.h>
14
15#include "qedf.h"
16#include "qedf_dbg.h"
17
18static struct dentry *qedf_dbg_root;
19
20/**
21 * qedf_dbg_host_init - setup the debugfs file for the pf
22 * @pf: the pf that is starting up
23 **/
24void
25qedf_dbg_host_init(struct qedf_dbg_ctx *qedf,
26 struct qedf_debugfs_ops *dops,
27 struct file_operations *fops)
28{
29 char host_dirname[32];
30 struct dentry *file_dentry = NULL;
31
32 QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Creating debugfs host node\n");
33 /* create pf dir */
34 sprintf(host_dirname, "host%u", qedf->host_no);
35 qedf->bdf_dentry = debugfs_create_dir(host_dirname, qedf_dbg_root);
36 if (!qedf->bdf_dentry)
37 return;
38
39 /* create debugfs files */
40 while (dops) {
41 if (!(dops->name))
42 break;
43
44 file_dentry = debugfs_create_file(dops->name, 0600,
45 qedf->bdf_dentry, qedf,
46 fops);
47 if (!file_dentry) {
48 QEDF_INFO(qedf, QEDF_LOG_DEBUGFS,
49 "Debugfs entry %s creation failed\n",
50 dops->name);
51 debugfs_remove_recursive(qedf->bdf_dentry);
52 return;
53 }
54 dops++;
55 fops++;
56 }
57}
58
59/**
60 * qedf_dbg_host_exit - clear out the pf's debugfs entries
61 * @pf: the pf that is stopping
62 **/
63void
64qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf)
65{
66 QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Destroying debugfs host "
67 "entry\n");
68 /* remove debugfs entries of this PF */
69 debugfs_remove_recursive(qedf->bdf_dentry);
70 qedf->bdf_dentry = NULL;
71}
72
73/**
74 * qedf_dbg_init - start up debugfs for the driver
75 **/
76void
77qedf_dbg_init(char *drv_name)
78{
79 QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Creating debugfs root node\n");
80
81 /* create qed dir in root of debugfs. NULL means debugfs root */
82 qedf_dbg_root = debugfs_create_dir(drv_name, NULL);
83 if (!qedf_dbg_root)
84 QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Init of debugfs "
85 "failed\n");
86}
87
88/**
89 * qedf_dbg_exit - clean out the driver's debugfs entries
90 **/
91void
92qedf_dbg_exit(void)
93{
94 QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Destroying debugfs root "
95 "entry\n");
96
97 /* remove qed dir in root of debugfs */
98 debugfs_remove_recursive(qedf_dbg_root);
99 qedf_dbg_root = NULL;
100}
101
102struct qedf_debugfs_ops qedf_debugfs_ops[] = {
103 { "fp_int", NULL },
104 { "io_trace", NULL },
105 { "debug", NULL },
106 { "stop_io_on_error", NULL},
107 { "driver_stats", NULL},
108 { "clear_stats", NULL},
109 { "offload_stats", NULL},
110 /* This must be last */
111 { NULL, NULL }
112};
113
114DECLARE_PER_CPU(struct qedf_percpu_iothread_s, qedf_percpu_iothreads);
115
116static ssize_t
117qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
118 loff_t *ppos)
119{
120 size_t cnt = 0;
121 int id;
122 struct qedf_fastpath *fp = NULL;
123 struct qedf_dbg_ctx *qedf_dbg =
124 (struct qedf_dbg_ctx *)filp->private_data;
125 struct qedf_ctx *qedf = container_of(qedf_dbg,
126 struct qedf_ctx, dbg_ctx);
127
128 QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
129
130 cnt = sprintf(buffer, "\nFastpath I/O completions\n\n");
131
132 for (id = 0; id < qedf->num_queues; id++) {
133 fp = &(qedf->fp_array[id]);
134 if (fp->sb_id == QEDF_SB_ID_NULL)
135 continue;
136 cnt += sprintf((buffer + cnt), "#%d: %lu\n", id,
137 fp->completions);
138 }
139
140 cnt = min_t(int, count, cnt - *ppos);
141 *ppos += cnt;
142 return cnt;
143}
144
145static ssize_t
146qedf_dbg_fp_int_cmd_write(struct file *filp, const char __user *buffer,
147 size_t count, loff_t *ppos)
148{
149 if (!count || *ppos)
150 return 0;
151
152 return count;
153}
154
155static ssize_t
156qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count,
157 loff_t *ppos)
158{
159 int cnt;
160 struct qedf_dbg_ctx *qedf =
161 (struct qedf_dbg_ctx *)filp->private_data;
162
163 QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "entered\n");
164 cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug);
165
166 cnt = min_t(int, count, cnt - *ppos);
167 *ppos += cnt;
168 return cnt;
169}
170
171static ssize_t
172qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer,
173 size_t count, loff_t *ppos)
174{
175 uint32_t val;
176 void *kern_buf;
177 int rval;
178 struct qedf_dbg_ctx *qedf =
179 (struct qedf_dbg_ctx *)filp->private_data;
180
181 if (!count || *ppos)
182 return 0;
183
184 kern_buf = memdup_user(buffer, count);
185 if (IS_ERR(kern_buf))
186 return PTR_ERR(kern_buf);
187
188 rval = kstrtouint(kern_buf, 10, &val);
189 kfree(kern_buf);
190 if (rval)
191 return rval;
192
193 if (val == 1)
194 qedf_debug = QEDF_DEFAULT_LOG_MASK;
195 else
196 qedf_debug = val;
197
198 QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val);
199 return count;
200}
201
202static ssize_t
203qedf_dbg_stop_io_on_error_cmd_read(struct file *filp, char __user *buffer,
204 size_t count, loff_t *ppos)
205{
206 int cnt;
207 struct qedf_dbg_ctx *qedf_dbg =
208 (struct qedf_dbg_ctx *)filp->private_data;
209 struct qedf_ctx *qedf = container_of(qedf_dbg,
210 struct qedf_ctx, dbg_ctx);
211
212 QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
213 cnt = sprintf(buffer, "%s\n",
214 qedf->stop_io_on_error ? "true" : "false");
215
216 cnt = min_t(int, count, cnt - *ppos);
217 *ppos += cnt;
218 return cnt;
219}
220
221static ssize_t
222qedf_dbg_stop_io_on_error_cmd_write(struct file *filp,
223 const char __user *buffer, size_t count,
224 loff_t *ppos)
225{
226 void *kern_buf;
227 struct qedf_dbg_ctx *qedf_dbg =
228 (struct qedf_dbg_ctx *)filp->private_data;
229 struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx,
230 dbg_ctx);
231
232 QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
233
234 if (!count || *ppos)
235 return 0;
236
237 kern_buf = memdup_user(buffer, 6);
238 if (IS_ERR(kern_buf))
239 return PTR_ERR(kern_buf);
240
241 if (strncmp(kern_buf, "false", 5) == 0)
242 qedf->stop_io_on_error = false;
243 else if (strncmp(kern_buf, "true", 4) == 0)
244 qedf->stop_io_on_error = true;
245 else if (strncmp(kern_buf, "now", 3) == 0)
246 /* Trigger from user to stop all I/O on this host */
247 set_bit(QEDF_DBG_STOP_IO, &qedf->flags);
248
249 kfree(kern_buf);
250 return count;
251}
252
253static int
254qedf_io_trace_show(struct seq_file *s, void *unused)
255{
256 int i, idx = 0;
257 struct qedf_ctx *qedf = s->private;
258 struct qedf_dbg_ctx *qedf_dbg = &qedf->dbg_ctx;
259 struct qedf_io_log *io_log;
260 unsigned long flags;
261
262 if (!qedf_io_tracing) {
263 seq_puts(s, "I/O tracing not enabled.\n");
264 goto out;
265 }
266
267 QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
268
269 spin_lock_irqsave(&qedf->io_trace_lock, flags);
270 idx = qedf->io_trace_idx;
271 for (i = 0; i < QEDF_IO_TRACE_SIZE; i++) {
272 io_log = &qedf->io_trace_buf[idx];
273 seq_printf(s, "%d:", io_log->direction);
274 seq_printf(s, "0x%x:", io_log->task_id);
275 seq_printf(s, "0x%06x:", io_log->port_id);
276 seq_printf(s, "%d:", io_log->lun);
277 seq_printf(s, "0x%02x:", io_log->op);
278 seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0],
279 io_log->lba[1], io_log->lba[2], io_log->lba[3]);
280 seq_printf(s, "%d:", io_log->bufflen);
281 seq_printf(s, "%d:", io_log->sg_count);
282 seq_printf(s, "0x%08x:", io_log->result);
283 seq_printf(s, "%lu:", io_log->jiffies);
284 seq_printf(s, "%d:", io_log->refcount);
285 seq_printf(s, "%d:", io_log->req_cpu);
286 seq_printf(s, "%d:", io_log->int_cpu);
287 seq_printf(s, "%d:", io_log->rsp_cpu);
288 seq_printf(s, "%d\n", io_log->sge_type);
289
290 idx++;
291 if (idx == QEDF_IO_TRACE_SIZE)
292 idx = 0;
293 }
294 spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
295
296out:
297 return 0;
298}
299
300static int
301qedf_dbg_io_trace_open(struct inode *inode, struct file *file)
302{
303 struct qedf_dbg_ctx *qedf_dbg = inode->i_private;
304 struct qedf_ctx *qedf = container_of(qedf_dbg,
305 struct qedf_ctx, dbg_ctx);
306
307 return single_open(file, qedf_io_trace_show, qedf);
308}
309
310static int
311qedf_driver_stats_show(struct seq_file *s, void *unused)
312{
313 struct qedf_ctx *qedf = s->private;
314 struct qedf_rport *fcport;
315 struct fc_rport_priv *rdata;
316
317 seq_printf(s, "cmg_mgr free io_reqs: %d\n",
318 atomic_read(&qedf->cmd_mgr->free_list_cnt));
319 seq_printf(s, "slow SGEs: %d\n", qedf->slow_sge_ios);
320 seq_printf(s, "single SGEs: %d\n", qedf->single_sge_ios);
321 seq_printf(s, "fast SGEs: %d\n\n", qedf->fast_sge_ios);
322
323 seq_puts(s, "Offloaded ports:\n\n");
324
325 rcu_read_lock();
326 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
327 rdata = fcport->rdata;
328 if (rdata == NULL)
329 continue;
330 seq_printf(s, "%06x: free_sqes: %d, num_active_ios: %d\n",
331 rdata->ids.port_id, atomic_read(&fcport->free_sqes),
332 atomic_read(&fcport->num_active_ios));
333 }
334 rcu_read_unlock();
335
336 return 0;
337}
338
339static int
340qedf_dbg_driver_stats_open(struct inode *inode, struct file *file)
341{
342 struct qedf_dbg_ctx *qedf_dbg = inode->i_private;
343 struct qedf_ctx *qedf = container_of(qedf_dbg,
344 struct qedf_ctx, dbg_ctx);
345
346 return single_open(file, qedf_driver_stats_show, qedf);
347}
348
349static ssize_t
350qedf_dbg_clear_stats_cmd_read(struct file *filp, char __user *buffer,
351 size_t count, loff_t *ppos)
352{
353 int cnt = 0;
354
355 /* Essentially a read stub */
356 cnt = min_t(int, count, cnt - *ppos);
357 *ppos += cnt;
358 return cnt;
359}
360
361static ssize_t
362qedf_dbg_clear_stats_cmd_write(struct file *filp,
363 const char __user *buffer, size_t count,
364 loff_t *ppos)
365{
366 struct qedf_dbg_ctx *qedf_dbg =
367 (struct qedf_dbg_ctx *)filp->private_data;
368 struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx,
369 dbg_ctx);
370
371 QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Clearing stat counters.\n");
372
373 if (!count || *ppos)
374 return 0;
375
376 /* Clear stat counters exposed by 'stats' node */
377 qedf->slow_sge_ios = 0;
378 qedf->single_sge_ios = 0;
379 qedf->fast_sge_ios = 0;
380
381 return count;
382}
383
384static int
385qedf_offload_stats_show(struct seq_file *s, void *unused)
386{
387 struct qedf_ctx *qedf = s->private;
388 struct qed_fcoe_stats *fw_fcoe_stats;
389
390 fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL);
391 if (!fw_fcoe_stats) {
392 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
393 "fw_fcoe_stats.\n");
394 goto out;
395 }
396
397 /* Query firmware for offload stats */
398 qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
399
400 seq_printf(s, "fcoe_rx_byte_cnt=%llu\n"
401 "fcoe_rx_data_pkt_cnt=%llu\n"
402 "fcoe_rx_xfer_pkt_cnt=%llu\n"
403 "fcoe_rx_other_pkt_cnt=%llu\n"
404 "fcoe_silent_drop_pkt_cmdq_full_cnt=%u\n"
405 "fcoe_silent_drop_pkt_crc_error_cnt=%u\n"
406 "fcoe_silent_drop_pkt_task_invalid_cnt=%u\n"
407 "fcoe_silent_drop_total_pkt_cnt=%u\n"
408 "fcoe_silent_drop_pkt_rq_full_cnt=%u\n"
409 "fcoe_tx_byte_cnt=%llu\n"
410 "fcoe_tx_data_pkt_cnt=%llu\n"
411 "fcoe_tx_xfer_pkt_cnt=%llu\n"
412 "fcoe_tx_other_pkt_cnt=%llu\n",
413 fw_fcoe_stats->fcoe_rx_byte_cnt,
414 fw_fcoe_stats->fcoe_rx_data_pkt_cnt,
415 fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt,
416 fw_fcoe_stats->fcoe_rx_other_pkt_cnt,
417 fw_fcoe_stats->fcoe_silent_drop_pkt_cmdq_full_cnt,
418 fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt,
419 fw_fcoe_stats->fcoe_silent_drop_pkt_task_invalid_cnt,
420 fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt,
421 fw_fcoe_stats->fcoe_silent_drop_pkt_rq_full_cnt,
422 fw_fcoe_stats->fcoe_tx_byte_cnt,
423 fw_fcoe_stats->fcoe_tx_data_pkt_cnt,
424 fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt,
425 fw_fcoe_stats->fcoe_tx_other_pkt_cnt);
426
427 kfree(fw_fcoe_stats);
428out:
429 return 0;
430}
431
432static int
433qedf_dbg_offload_stats_open(struct inode *inode, struct file *file)
434{
435 struct qedf_dbg_ctx *qedf_dbg = inode->i_private;
436 struct qedf_ctx *qedf = container_of(qedf_dbg,
437 struct qedf_ctx, dbg_ctx);
438
439 return single_open(file, qedf_offload_stats_show, qedf);
440}
441
442
443const struct file_operations qedf_dbg_fops[] = {
444 qedf_dbg_fileops(qedf, fp_int),
445 qedf_dbg_fileops_seq(qedf, io_trace),
446 qedf_dbg_fileops(qedf, debug),
447 qedf_dbg_fileops(qedf, stop_io_on_error),
448 qedf_dbg_fileops_seq(qedf, driver_stats),
449 qedf_dbg_fileops(qedf, clear_stats),
450 qedf_dbg_fileops_seq(qedf, offload_stats),
451 /* This must be last */
452 { NULL, NULL },
453};
454
455#else /* CONFIG_DEBUG_FS */
456void qedf_dbg_host_init(struct qedf_dbg_ctx *);
457void qedf_dbg_host_exit(struct qedf_dbg_ctx *);
458void qedf_dbg_init(char *);
459void qedf_dbg_exit(void);
460#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
new file mode 100644
index 000000000000..78f1c252b649
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -0,0 +1,949 @@
1/*
2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9#include "qedf.h"
10
11/* It's assumed that the lock is held when calling this function. */
12static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
13 void *data, uint32_t data_len,
14 void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
15 struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
16{
17 struct qedf_ctx *qedf = fcport->qedf;
18 struct fc_lport *lport = qedf->lport;
19 struct qedf_ioreq *els_req;
20 struct qedf_mp_req *mp_req;
21 struct fc_frame_header *fc_hdr;
22 struct fcoe_task_context *task;
23 int rc = 0;
24 uint32_t did, sid;
25 uint16_t xid;
26 uint32_t start_time = jiffies / HZ;
27 uint32_t current_time;
28
29 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
30
31 rc = fc_remote_port_chkready(fcport->rport);
32 if (rc) {
33 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op);
34 rc = -EAGAIN;
35 goto els_err;
36 }
37 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
38 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n",
39 op);
40 rc = -EAGAIN;
41 goto els_err;
42 }
43
44 if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
45 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
46 rc = -EINVAL;
47 goto els_err;
48 }
49
50retry_els:
51 els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
52 if (!els_req) {
53 current_time = jiffies / HZ;
54 if ((current_time - start_time) > 10) {
55 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
56 "els: Failed els 0x%x\n", op);
57 rc = -ENOMEM;
58 goto els_err;
59 }
60 mdelay(20 * USEC_PER_MSEC);
61 goto retry_els;
62 }
63
64 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
65 "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
66 els_req->xid);
67 els_req->sc_cmd = NULL;
68 els_req->cmd_type = QEDF_ELS;
69 els_req->fcport = fcport;
70 els_req->cb_func = cb_func;
71 cb_arg->io_req = els_req;
72 cb_arg->op = op;
73 els_req->cb_arg = cb_arg;
74 els_req->data_xfer_len = data_len;
75
76 /* Record which cpu this request is associated with */
77 els_req->cpu = smp_processor_id();
78
79 mp_req = (struct qedf_mp_req *)&(els_req->mp_req);
80 rc = qedf_init_mp_req(els_req);
81 if (rc) {
82 QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n");
83 kref_put(&els_req->refcount, qedf_release_cmd);
84 goto els_err;
85 } else {
86 rc = 0;
87 }
88
89 /* Fill ELS Payload */
90 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
91 memcpy(mp_req->req_buf, data, data_len);
92 } else {
93 QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op);
94 els_req->cb_func = NULL;
95 els_req->cb_arg = NULL;
96 kref_put(&els_req->refcount, qedf_release_cmd);
97 rc = -EINVAL;
98 }
99
100 if (rc)
101 goto els_err;
102
103 /* Fill FC header */
104 fc_hdr = &(mp_req->req_fc_hdr);
105
106 did = fcport->rdata->ids.port_id;
107 sid = fcport->sid;
108
109 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, sid, did,
110 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
111 FC_FC_SEQ_INIT, 0);
112
113 /* Obtain exchange id */
114 xid = els_req->xid;
115
116 /* Initialize task context for this IO request */
117 task = qedf_get_task_mem(&qedf->tasks, xid);
118 qedf_init_mp_task(els_req, task);
119
120 /* Put timer on original I/O request */
121 if (timer_msec)
122 qedf_cmd_timer_set(qedf, els_req, timer_msec);
123
124 qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
125
126 /* Ring doorbell */
127 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
128 "req\n");
129 qedf_ring_doorbell(fcport);
130els_err:
131 return rc;
132}
133
134void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
135 struct qedf_ioreq *els_req)
136{
137 struct fcoe_task_context *task_ctx;
138 struct scsi_cmnd *sc_cmd;
139 uint16_t xid;
140 struct fcoe_cqe_midpath_info *mp_info;
141
142 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
143 " cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
144
145 /* Kill the ELS timer */
146 cancel_delayed_work(&els_req->timeout_work);
147
148 xid = els_req->xid;
149 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
150 sc_cmd = els_req->sc_cmd;
151
152 /* Get ELS response length from CQE */
153 mp_info = &cqe->cqe_info.midpath_info;
154 els_req->mp_req.resp_len = mp_info->data_placement_size;
155
156 /* Parse ELS response */
157 if ((els_req->cb_func) && (els_req->cb_arg)) {
158 els_req->cb_func(els_req->cb_arg);
159 els_req->cb_arg = NULL;
160 }
161
162 kref_put(&els_req->refcount, qedf_release_cmd);
163}
164
165static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
166{
167 struct qedf_ioreq *orig_io_req;
168 struct qedf_ioreq *rrq_req;
169 struct qedf_ctx *qedf;
170 int refcount;
171
172 rrq_req = cb_arg->io_req;
173 qedf = rrq_req->fcport->qedf;
174
175 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n");
176
177 orig_io_req = cb_arg->aborted_io_req;
178
179 if (!orig_io_req)
180 goto out_free;
181
182 if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
183 rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
184 cancel_delayed_work_sync(&orig_io_req->timeout_work);
185
186 refcount = atomic_read(&orig_io_req->refcount.refcount);
187 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
188 " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
189 orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
190
191 /* This should return the aborted io_req to the command pool */
192 if (orig_io_req)
193 kref_put(&orig_io_req->refcount, qedf_release_cmd);
194
195out_free:
196 kfree(cb_arg);
197}
198
199/* Assumes kref is already held by caller */
200int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
201{
202
203 struct fc_els_rrq rrq;
204 struct qedf_rport *fcport;
205 struct fc_lport *lport;
206 struct qedf_els_cb_arg *cb_arg = NULL;
207 struct qedf_ctx *qedf;
208 uint32_t sid;
209 uint32_t r_a_tov;
210 int rc;
211
212 if (!aborted_io_req) {
213 QEDF_ERR(NULL, "abort_io_req is NULL.\n");
214 return -EINVAL;
215 }
216
217 fcport = aborted_io_req->fcport;
218
219 /* Check that fcport is still offloaded */
220 if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
221 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
222 return -EINVAL;
223 }
224
225 if (!fcport->qedf) {
226 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
227 return -EINVAL;
228 }
229
230 qedf = fcport->qedf;
231 lport = qedf->lport;
232 sid = fcport->sid;
233 r_a_tov = lport->r_a_tov;
234
235 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig "
236 "io = %p, orig_xid = 0x%x\n", aborted_io_req,
237 aborted_io_req->xid);
238 memset(&rrq, 0, sizeof(rrq));
239
240 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
241 if (!cb_arg) {
242 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
243 "RRQ\n");
244 rc = -ENOMEM;
245 goto rrq_err;
246 }
247
248 cb_arg->aborted_io_req = aborted_io_req;
249
250 rrq.rrq_cmd = ELS_RRQ;
251 hton24(rrq.rrq_s_id, sid);
252 rrq.rrq_ox_id = htons(aborted_io_req->xid);
253 rrq.rrq_rx_id =
254 htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
255
256 rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq),
257 qedf_rrq_compl, cb_arg, r_a_tov);
258
259rrq_err:
260 if (rc) {
261 QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io "
262 "req 0x%x\n", aborted_io_req->xid);
263 kfree(cb_arg);
264 kref_put(&aborted_io_req->refcount, qedf_release_cmd);
265 }
266 return rc;
267}
268
269static void qedf_process_l2_frame_compl(struct qedf_rport *fcport,
270 struct fc_frame *fp,
271 u16 l2_oxid)
272{
273 struct fc_lport *lport = fcport->qedf->lport;
274 struct fc_frame_header *fh;
275 u32 crc;
276
277 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
278
279 /* Set the OXID we return to what libfc used */
280 if (l2_oxid != FC_XID_UNKNOWN)
281 fh->fh_ox_id = htons(l2_oxid);
282
283 /* Setup header fields */
284 fh->fh_r_ctl = FC_RCTL_ELS_REP;
285 fh->fh_type = FC_TYPE_ELS;
286 /* Last sequence, end sequence */
287 fh->fh_f_ctl[0] = 0x98;
288 hton24(fh->fh_d_id, lport->port_id);
289 hton24(fh->fh_s_id, fcport->rdata->ids.port_id);
290 fh->fh_rx_id = 0xffff;
291
292 /* Set frame attributes */
293 crc = fcoe_fc_crc(fp);
294 fc_frame_init(fp);
295 fr_dev(fp) = lport;
296 fr_sof(fp) = FC_SOF_I3;
297 fr_eof(fp) = FC_EOF_T;
298 fr_crc(fp) = cpu_to_le32(~crc);
299
300 /* Send completed request to libfc */
301 fc_exch_recv(lport, fp);
302}
303
304/*
305 * In instances where an ELS command times out we may need to restart the
306 * rport by logging out and then logging back in.
307 */
308void qedf_restart_rport(struct qedf_rport *fcport)
309{
310 struct fc_lport *lport;
311 struct fc_rport_priv *rdata;
312 u32 port_id;
313
314 if (!fcport)
315 return;
316
317 rdata = fcport->rdata;
318 if (rdata) {
319 lport = fcport->qedf->lport;
320 port_id = rdata->ids.port_id;
321 QEDF_ERR(&(fcport->qedf->dbg_ctx),
322 "LOGO port_id=%x.\n", port_id);
323 fc_rport_logoff(rdata);
324 /* Recreate the rport and log back in */
325 rdata = fc_rport_create(lport, port_id);
326 if (rdata)
327 fc_rport_login(rdata);
328 }
329}
330
331static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
332{
333 struct qedf_ioreq *els_req;
334 struct qedf_rport *fcport;
335 struct qedf_mp_req *mp_req;
336 struct fc_frame *fp;
337 struct fc_frame_header *fh, *mp_fc_hdr;
338 void *resp_buf, *fc_payload;
339 u32 resp_len;
340 u16 l2_oxid;
341
342 l2_oxid = cb_arg->l2_oxid;
343 els_req = cb_arg->io_req;
344
345 if (!els_req) {
346 QEDF_ERR(NULL, "els_req is NULL.\n");
347 goto free_arg;
348 }
349
350 /*
351 * If we are flushing the command just free the cb_arg as none of the
352 * response data will be valid.
353 */
354 if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
355 goto free_arg;
356
357 fcport = els_req->fcport;
358 mp_req = &(els_req->mp_req);
359 mp_fc_hdr = &(mp_req->resp_fc_hdr);
360 resp_len = mp_req->resp_len;
361 resp_buf = mp_req->resp_buf;
362
363 /*
364 * If a middle path ELS command times out, don't try to return
365 * the command but rather do any internal cleanup and then libfc
366 * timeout the command and clean up its internal resources.
367 */
368 if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) {
369 /*
370 * If ADISC times out, libfc will timeout the exchange and then
371 * try to send a PLOGI which will timeout since the session is
372 * still offloaded. Force libfc to logout the session which
373 * will offload the connection and allow the PLOGI response to
374 * flow over the LL2 path.
375 */
376 if (cb_arg->op == ELS_ADISC)
377 qedf_restart_rport(fcport);
378 return;
379 }
380
381 if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
382 QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is "
383 "beyond page size.\n");
384 goto free_arg;
385 }
386
387 fp = fc_frame_alloc(fcport->qedf->lport, resp_len);
388 if (!fp) {
389 QEDF_ERR(&(fcport->qedf->dbg_ctx),
390 "fc_frame_alloc failure.\n");
391 return;
392 }
393
394 /* Copy frame header from firmware into fp */
395 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
396 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
397
398 /* Copy payload from firmware into fp */
399 fc_payload = fc_frame_payload_get(fp, resp_len);
400 memcpy(fc_payload, resp_buf, resp_len);
401
402 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
403 "Completing OX_ID 0x%x back to libfc.\n", l2_oxid);
404 qedf_process_l2_frame_compl(fcport, fp, l2_oxid);
405
406free_arg:
407 kfree(cb_arg);
408}
409
410int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp)
411{
412 struct fc_els_adisc *adisc;
413 struct fc_frame_header *fh;
414 struct fc_lport *lport = fcport->qedf->lport;
415 struct qedf_els_cb_arg *cb_arg = NULL;
416 struct qedf_ctx *qedf;
417 uint32_t r_a_tov = lport->r_a_tov;
418 int rc;
419
420 qedf = fcport->qedf;
421 fh = fc_frame_header_get(fp);
422
423 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
424 if (!cb_arg) {
425 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
426 "ADISC\n");
427 rc = -ENOMEM;
428 goto adisc_err;
429 }
430 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
431
432 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
433 "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid);
434
435 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
436
437 rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc),
438 qedf_l2_els_compl, cb_arg, r_a_tov);
439
440adisc_err:
441 if (rc) {
442 QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n");
443 kfree(cb_arg);
444 }
445 return rc;
446}
447
448static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
449{
450 struct qedf_ioreq *orig_io_req;
451 struct qedf_ioreq *srr_req;
452 struct qedf_mp_req *mp_req;
453 struct fc_frame_header *mp_fc_hdr, *fh;
454 struct fc_frame *fp;
455 void *resp_buf, *fc_payload;
456 u32 resp_len;
457 struct fc_lport *lport;
458 struct qedf_ctx *qedf;
459 int refcount;
460 u8 opcode;
461
462 srr_req = cb_arg->io_req;
463 qedf = srr_req->fcport->qedf;
464 lport = qedf->lport;
465
466 orig_io_req = cb_arg->aborted_io_req;
467
468 if (!orig_io_req)
469 goto out_free;
470
471 clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
472
473 if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO &&
474 srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
475 cancel_delayed_work_sync(&orig_io_req->timeout_work);
476
477 refcount = atomic_read(&orig_io_req->refcount.refcount);
478 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
479 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
480 orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
481
482 /* If a SRR times out, simply free resources */
483 if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
484 goto out_free;
485
486 /* Normalize response data into struct fc_frame */
487 mp_req = &(srr_req->mp_req);
488 mp_fc_hdr = &(mp_req->resp_fc_hdr);
489 resp_len = mp_req->resp_len;
490 resp_buf = mp_req->resp_buf;
491
492 fp = fc_frame_alloc(lport, resp_len);
493 if (!fp) {
494 QEDF_ERR(&(qedf->dbg_ctx),
495 "fc_frame_alloc failure.\n");
496 goto out_free;
497 }
498
499 /* Copy frame header from firmware into fp */
500 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
501 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
502
503 /* Copy payload from firmware into fp */
504 fc_payload = fc_frame_payload_get(fp, resp_len);
505 memcpy(fc_payload, resp_buf, resp_len);
506
507 opcode = fc_frame_payload_op(fp);
508 switch (opcode) {
509 case ELS_LS_ACC:
510 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
511 "SRR success.\n");
512 break;
513 case ELS_LS_RJT:
514 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
515 "SRR rejected.\n");
516 qedf_initiate_abts(orig_io_req, true);
517 break;
518 }
519
520 fc_frame_free(fp);
521out_free:
522 /* Put reference for original command since SRR completed */
523 kref_put(&orig_io_req->refcount, qedf_release_cmd);
524 kfree(cb_arg);
525}
526
527static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
528{
529 struct fcp_srr srr;
530 struct qedf_ctx *qedf;
531 struct qedf_rport *fcport;
532 struct fc_lport *lport;
533 struct qedf_els_cb_arg *cb_arg = NULL;
534 u32 sid, r_a_tov;
535 int rc;
536
537 if (!orig_io_req) {
538 QEDF_ERR(NULL, "orig_io_req is NULL.\n");
539 return -EINVAL;
540 }
541
542 fcport = orig_io_req->fcport;
543
544 /* Check that fcport is still offloaded */
545 if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
546 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
547 return -EINVAL;
548 }
549
550 if (!fcport->qedf) {
551 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
552 return -EINVAL;
553 }
554
555 /* Take reference until SRR command completion */
556 kref_get(&orig_io_req->refcount);
557
558 qedf = fcport->qedf;
559 lport = qedf->lport;
560 sid = fcport->sid;
561 r_a_tov = lport->r_a_tov;
562
563 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
564 "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid);
565 memset(&srr, 0, sizeof(srr));
566
567 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
568 if (!cb_arg) {
569 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
570 "SRR\n");
571 rc = -ENOMEM;
572 goto srr_err;
573 }
574
575 cb_arg->aborted_io_req = orig_io_req;
576
577 srr.srr_op = ELS_SRR;
578 srr.srr_ox_id = htons(orig_io_req->xid);
579 srr.srr_rx_id = htons(orig_io_req->rx_id);
580 srr.srr_rel_off = htonl(offset);
581 srr.srr_r_ctl = r_ctl;
582
583 rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr),
584 qedf_srr_compl, cb_arg, r_a_tov);
585
586srr_err:
587 if (rc) {
588 QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req"
589 "=0x%x\n", orig_io_req->xid);
590 kfree(cb_arg);
591 /* If we fail to queue SRR, send ABTS to orig_io */
592 qedf_initiate_abts(orig_io_req, true);
593 kref_put(&orig_io_req->refcount, qedf_release_cmd);
594 } else
595 /* Tell other threads that SRR is in progress */
596 set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
597
598 return rc;
599}
600
601static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
602 u32 offset, u8 r_ctl)
603{
604 struct qedf_rport *fcport;
605 unsigned long flags;
606 struct qedf_els_cb_arg *cb_arg;
607
608 fcport = orig_io_req->fcport;
609
610 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
611 "Doing sequence cleanup for xid=0x%x offset=%u.\n",
612 orig_io_req->xid, offset);
613
614 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
615 if (!cb_arg) {
616 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg "
617 "for sequence cleanup\n");
618 return;
619 }
620
621 /* Get reference for cleanup request */
622 kref_get(&orig_io_req->refcount);
623
624 orig_io_req->cmd_type = QEDF_SEQ_CLEANUP;
625 cb_arg->offset = offset;
626 cb_arg->r_ctl = r_ctl;
627 orig_io_req->cb_arg = cb_arg;
628
629 qedf_cmd_timer_set(fcport->qedf, orig_io_req,
630 QEDF_CLEANUP_TIMEOUT * HZ);
631
632 spin_lock_irqsave(&fcport->rport_lock, flags);
633
634 qedf_add_to_sq(fcport, orig_io_req->xid, 0,
635 FCOE_TASK_TYPE_SEQUENCE_CLEANUP, offset);
636 qedf_ring_doorbell(fcport);
637
638 spin_unlock_irqrestore(&fcport->rport_lock, flags);
639}
640
641void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
642 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
643{
644 int rc;
645 struct qedf_els_cb_arg *cb_arg;
646
647 cb_arg = io_req->cb_arg;
648
649 /* If we timed out just free resources */
650 if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe)
651 goto free;
652
653 /* Kill the timer we put on the request */
654 cancel_delayed_work_sync(&io_req->timeout_work);
655
656 rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl);
657 if (rc)
658 QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will "
659 "abort, xid=0x%x.\n", io_req->xid);
660free:
661 kfree(cb_arg);
662 kref_put(&io_req->refcount, qedf_release_cmd);
663}
664
665static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req)
666{
667 struct qedf_rport *fcport;
668 struct qedf_ioreq *new_io_req;
669 unsigned long flags;
670 bool rc = false;
671
672 fcport = orig_io_req->fcport;
673 if (!fcport) {
674 QEDF_ERR(NULL, "fcport is NULL.\n");
675 goto out;
676 }
677
678 if (!orig_io_req->sc_cmd) {
679 QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for "
680 "xid=0x%x.\n", orig_io_req->xid);
681 goto out;
682 }
683
684 new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
685 if (!new_io_req) {
686 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new "
687 "io_req.\n");
688 goto out;
689 }
690
691 new_io_req->sc_cmd = orig_io_req->sc_cmd;
692
693 /*
694 * This keeps the sc_cmd struct from being returned to the tape
695 * driver and being requeued twice. We do need to put a reference
696 * for the original I/O request since we will not do a SCSI completion
697 * for it.
698 */
699 orig_io_req->sc_cmd = NULL;
700 kref_put(&orig_io_req->refcount, qedf_release_cmd);
701
702 spin_lock_irqsave(&fcport->rport_lock, flags);
703
704 /* kref for new command released in qedf_post_io_req on error */
705 if (qedf_post_io_req(fcport, new_io_req)) {
706 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n");
707 /* Return SQE to pool */
708 atomic_inc(&fcport->free_sqes);
709 } else {
710 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
711 "Reissued SCSI command from orig_xid=0x%x on "
712 "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid);
713 /*
714 * Abort the original I/O but do not return SCSI command as
715 * it has been reissued on another OX_ID.
716 */
717 spin_unlock_irqrestore(&fcport->rport_lock, flags);
718 qedf_initiate_abts(orig_io_req, false);
719 goto out;
720 }
721
722 spin_unlock_irqrestore(&fcport->rport_lock, flags);
723out:
724 return rc;
725}
726
727
728static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
729{
730 struct qedf_ioreq *orig_io_req;
731 struct qedf_ioreq *rec_req;
732 struct qedf_mp_req *mp_req;
733 struct fc_frame_header *mp_fc_hdr, *fh;
734 struct fc_frame *fp;
735 void *resp_buf, *fc_payload;
736 u32 resp_len;
737 struct fc_lport *lport;
738 struct qedf_ctx *qedf;
739 int refcount;
740 enum fc_rctl r_ctl;
741 struct fc_els_ls_rjt *rjt;
742 struct fc_els_rec_acc *acc;
743 u8 opcode;
744 u32 offset, e_stat;
745 struct scsi_cmnd *sc_cmd;
746 bool srr_needed = false;
747
748 rec_req = cb_arg->io_req;
749 qedf = rec_req->fcport->qedf;
750 lport = qedf->lport;
751
752 orig_io_req = cb_arg->aborted_io_req;
753
754 if (!orig_io_req)
755 goto out_free;
756
757 if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
758 rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
759 cancel_delayed_work_sync(&orig_io_req->timeout_work);
760
761 refcount = atomic_read(&orig_io_req->refcount.refcount);
762 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
763 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
764 orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
765
766 /* If a REC times out, free resources */
767 if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
768 goto out_free;
769
770 /* Normalize response data into struct fc_frame */
771 mp_req = &(rec_req->mp_req);
772 mp_fc_hdr = &(mp_req->resp_fc_hdr);
773 resp_len = mp_req->resp_len;
774 acc = resp_buf = mp_req->resp_buf;
775
776 fp = fc_frame_alloc(lport, resp_len);
777 if (!fp) {
778 QEDF_ERR(&(qedf->dbg_ctx),
779 "fc_frame_alloc failure.\n");
780 goto out_free;
781 }
782
783 /* Copy frame header from firmware into fp */
784 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
785 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
786
787 /* Copy payload from firmware into fp */
788 fc_payload = fc_frame_payload_get(fp, resp_len);
789 memcpy(fc_payload, resp_buf, resp_len);
790
791 opcode = fc_frame_payload_op(fp);
792 if (opcode == ELS_LS_RJT) {
793 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
794 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
795 "Received LS_RJT for REC: er_reason=0x%x, "
796 "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
797 /*
798 * The following response(s) mean that we need to reissue the
799 * request on another exchange. We need to do this without
800 * informing the upper layers lest it cause an application
801 * error.
802 */
803 if ((rjt->er_reason == ELS_RJT_LOGIC ||
804 rjt->er_reason == ELS_RJT_UNAB) &&
805 rjt->er_explan == ELS_EXPL_OXID_RXID) {
806 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
807 "Handle CMD LOST case.\n");
808 qedf_requeue_io_req(orig_io_req);
809 }
810 } else if (opcode == ELS_LS_ACC) {
811 offset = ntohl(acc->reca_fc4value);
812 e_stat = ntohl(acc->reca_e_stat);
813 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
814 "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
815 offset, e_stat);
816 if (e_stat & ESB_ST_SEQ_INIT) {
817 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
818 "Target has the seq init\n");
819 goto out_free_frame;
820 }
821 sc_cmd = orig_io_req->sc_cmd;
822 if (!sc_cmd) {
823 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
824 "sc_cmd is NULL for xid=0x%x.\n",
825 orig_io_req->xid);
826 goto out_free_frame;
827 }
828 /* SCSI write case */
829 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
830 if (offset == orig_io_req->data_xfer_len) {
831 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
832 "WRITE - response lost.\n");
833 r_ctl = FC_RCTL_DD_CMD_STATUS;
834 srr_needed = true;
835 offset = 0;
836 } else {
837 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
838 "WRITE - XFER_RDY/DATA lost.\n");
839 r_ctl = FC_RCTL_DD_DATA_DESC;
840 /* Use data from warning CQE instead of REC */
841 offset = orig_io_req->tx_buf_off;
842 }
843 /* SCSI read case */
844 } else {
845 if (orig_io_req->rx_buf_off ==
846 orig_io_req->data_xfer_len) {
847 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
848 "READ - response lost.\n");
849 srr_needed = true;
850 r_ctl = FC_RCTL_DD_CMD_STATUS;
851 offset = 0;
852 } else {
853 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
854 "READ - DATA lost.\n");
855 /*
856 * For read case we always set the offset to 0
857 * for sequence recovery task.
858 */
859 offset = 0;
860 r_ctl = FC_RCTL_DD_SOL_DATA;
861 }
862 }
863
864 if (srr_needed)
865 qedf_send_srr(orig_io_req, offset, r_ctl);
866 else
867 qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
868 }
869
870out_free_frame:
871 fc_frame_free(fp);
872out_free:
873 /* Put reference for original command since REC completed */
874 kref_put(&orig_io_req->refcount, qedf_release_cmd);
875 kfree(cb_arg);
876}
877
878/* Assumes kref is already held by caller */
879int qedf_send_rec(struct qedf_ioreq *orig_io_req)
880{
881
882 struct fc_els_rec rec;
883 struct qedf_rport *fcport;
884 struct fc_lport *lport;
885 struct qedf_els_cb_arg *cb_arg = NULL;
886 struct qedf_ctx *qedf;
887 uint32_t sid;
888 uint32_t r_a_tov;
889 int rc;
890
891 if (!orig_io_req) {
892 QEDF_ERR(NULL, "orig_io_req is NULL.\n");
893 return -EINVAL;
894 }
895
896 fcport = orig_io_req->fcport;
897
898 /* Check that fcport is still offloaded */
899 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
900 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
901 return -EINVAL;
902 }
903
904 if (!fcport->qedf) {
905 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
906 return -EINVAL;
907 }
908
909 /* Take reference until REC command completion */
910 kref_get(&orig_io_req->refcount);
911
912 qedf = fcport->qedf;
913 lport = qedf->lport;
914 sid = fcport->sid;
915 r_a_tov = lport->r_a_tov;
916
917 memset(&rec, 0, sizeof(rec));
918
919 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
920 if (!cb_arg) {
921 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
922 "REC\n");
923 rc = -ENOMEM;
924 goto rec_err;
925 }
926
927 cb_arg->aborted_io_req = orig_io_req;
928
929 rec.rec_cmd = ELS_REC;
930 hton24(rec.rec_s_id, sid);
931 rec.rec_ox_id = htons(orig_io_req->xid);
932 rec.rec_rx_id =
933 htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
934
935 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, "
936 "orig_xid=0x%x rx_id=0x%x\n", orig_io_req,
937 orig_io_req->xid, rec.rec_rx_id);
938 rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec),
939 qedf_rec_compl, cb_arg, r_a_tov);
940
941rec_err:
942 if (rc) {
943 QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req"
944 "=0x%x\n", orig_io_req->xid);
945 kfree(cb_arg);
946 kref_put(&orig_io_req->refcount, qedf_release_cmd);
947 }
948 return rc;
949}
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
new file mode 100644
index 000000000000..868d423380d1
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -0,0 +1,269 @@
1/*
2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9#include <linux/if_ether.h>
10#include <linux/if_vlan.h>
11#include "qedf.h"
12
13extern const struct qed_fcoe_ops *qed_ops;
14/*
15 * FIP VLAN functions that will eventually move to libfcoe.
16 */
17
18void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf)
19{
20 struct sk_buff *skb;
21 char *eth_fr;
22 int fr_len;
23 struct fip_vlan *vlan;
24#define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 })
25 static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS;
26
27 skb = dev_alloc_skb(sizeof(struct fip_vlan));
28 if (!skb)
29 return;
30
31 fr_len = sizeof(*vlan);
32 eth_fr = (char *)skb->data;
33 vlan = (struct fip_vlan *)eth_fr;
34
35 memset(vlan, 0, sizeof(*vlan));
36 ether_addr_copy(vlan->eth.h_source, qedf->mac);
37 ether_addr_copy(vlan->eth.h_dest, my_fcoe_all_fcfs);
38 vlan->eth.h_proto = htons(ETH_P_FIP);
39
40 vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
41 vlan->fip.fip_op = htons(FIP_OP_VLAN);
42 vlan->fip.fip_subcode = FIP_SC_VL_REQ;
43 vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
44
45 vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
46 vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
47 ether_addr_copy(vlan->desc.mac.fd_mac, qedf->mac);
48
49 vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
50 vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
51 put_unaligned_be64(qedf->lport->wwnn, &vlan->desc.wwnn.fd_wwn);
52
53 skb_put(skb, sizeof(*vlan));
54 skb->protocol = htons(ETH_P_FIP);
55 skb_reset_mac_header(skb);
56 skb_reset_network_header(skb);
57
58 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Sending FIP VLAN "
59 "request.");
60
61 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
62 QEDF_WARN(&(qedf->dbg_ctx), "Cannot send vlan request "
63 "because link is not up.\n");
64
65 kfree_skb(skb);
66 return;
67 }
68 qed_ops->ll2->start_xmit(qedf->cdev, skb);
69}
70
71static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
72 struct sk_buff *skb)
73{
74 struct fip_header *fiph;
75 struct fip_desc *desc;
76 u16 vid = 0;
77 ssize_t rlen;
78 size_t dlen;
79
80 fiph = (struct fip_header *)(((void *)skb->data) + 2 * ETH_ALEN + 2);
81
82 rlen = ntohs(fiph->fip_dl_len) * 4;
83 desc = (struct fip_desc *)(fiph + 1);
84 while (rlen > 0) {
85 dlen = desc->fip_dlen * FIP_BPW;
86 switch (desc->fip_dtype) {
87 case FIP_DT_VLAN:
88 vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
89 break;
90 }
91 desc = (struct fip_desc *)((char *)desc + dlen);
92 rlen -= dlen;
93 }
94
95 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "VLAN response, "
96 "vid=0x%x.\n", vid);
97
98 if (vid > 0 && qedf->vlan_id != vid) {
99 qedf_set_vlan_id(qedf, vid);
100
101 /* Inform waiter that it's ok to call fcoe_ctlr_link up() */
102 complete(&qedf->fipvlan_compl);
103 }
104}
105
106void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
107{
108 struct qedf_ctx *qedf = container_of(fip, struct qedf_ctx, ctlr);
109 struct ethhdr *eth_hdr;
110 struct vlan_ethhdr *vlan_hdr;
111 struct fip_header *fiph;
112 u16 op, vlan_tci = 0;
113 u8 sub;
114
115 if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
116 QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
117 kfree_skb(skb);
118 return;
119 }
120
121 fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
122 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
123 op = ntohs(fiph->fip_op);
124 sub = fiph->fip_subcode;
125
126 if (!qedf->vlan_hw_insert) {
127 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, sizeof(*vlan_hdr)
128 - sizeof(*eth_hdr));
129 memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
130 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
131 vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
132 vlan_hdr->h_vlan_TCI = vlan_tci = htons(qedf->vlan_id);
133 }
134
135 /* Update eth_hdr since we added a VLAN tag */
136 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
137
138 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame send: "
139 "dest=%pM op=%x sub=%x vlan=%04x.", eth_hdr->h_dest, op, sub,
140 ntohs(vlan_tci));
141 if (qedf_dump_frames)
142 print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
143 skb->data, skb->len, false);
144
145 qed_ops->ll2->start_xmit(qedf->cdev, skb);
146}
147
148/* Process incoming FIP frames. */
149void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
150{
151 struct ethhdr *eth_hdr;
152 struct fip_header *fiph;
153 struct fip_desc *desc;
154 struct fip_mac_desc *mp;
155 struct fip_wwn_desc *wp;
156 struct fip_vn_desc *vp;
157 size_t rlen, dlen;
158 uint32_t cvl_port_id;
159 __u8 cvl_mac[ETH_ALEN];
160 u16 op;
161 u8 sub;
162
163 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
164 fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
165 op = ntohs(fiph->fip_op);
166 sub = fiph->fip_subcode;
167
168 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame received: "
169 "skb=%p fiph=%p source=%pM op=%x sub=%x", skb, fiph,
170 eth_hdr->h_source, op, sub);
171 if (qedf_dump_frames)
172 print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
173 skb->data, skb->len, false);
174
175 /* Handle FIP VLAN resp in the driver */
176 if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
177 qedf_fcoe_process_vlan_resp(qedf, skb);
178 qedf->vlan_hw_insert = 0;
179 kfree_skb(skb);
180 } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
181 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Clear virtual "
182 "link received.\n");
183
184 /* Check that an FCF has been selected by fcoe */
185 if (qedf->ctlr.sel_fcf == NULL) {
186 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
187 "Dropping CVL since FCF has not been selected "
188 "yet.");
189 return;
190 }
191
192 cvl_port_id = 0;
193 memset(cvl_mac, 0, ETH_ALEN);
194 /*
195 * We need to loop through the CVL descriptors to determine
196 * if we want to reset the fcoe link
197 */
198 rlen = ntohs(fiph->fip_dl_len) * FIP_BPW;
199 desc = (struct fip_desc *)(fiph + 1);
200 while (rlen >= sizeof(*desc)) {
201 dlen = desc->fip_dlen * FIP_BPW;
202 switch (desc->fip_dtype) {
203 case FIP_DT_MAC:
204 mp = (struct fip_mac_desc *)desc;
205 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
206 "fd_mac=%pM.\n", __func__, mp->fd_mac);
207 ether_addr_copy(cvl_mac, mp->fd_mac);
208 break;
209 case FIP_DT_NAME:
210 wp = (struct fip_wwn_desc *)desc;
211 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
212 "fc_wwpn=%016llx.\n",
213 get_unaligned_be64(&wp->fd_wwn));
214 break;
215 case FIP_DT_VN_ID:
216 vp = (struct fip_vn_desc *)desc;
217 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
218 "fd_fc_id=%x.\n", ntoh24(vp->fd_fc_id));
219 cvl_port_id = ntoh24(vp->fd_fc_id);
220 break;
221 default:
222 /* Ignore anything else */
223 break;
224 }
225 desc = (struct fip_desc *)((char *)desc + dlen);
226 rlen -= dlen;
227 }
228
229 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
230 "cvl_port_id=%06x cvl_mac=%pM.\n", cvl_port_id,
231 cvl_mac);
232 if (cvl_port_id == qedf->lport->port_id &&
233 ether_addr_equal(cvl_mac,
234 qedf->ctlr.sel_fcf->fcf_mac)) {
235 fcoe_ctlr_link_down(&qedf->ctlr);
236 qedf_wait_for_upload(qedf);
237 fcoe_ctlr_link_up(&qedf->ctlr);
238 }
239 kfree_skb(skb);
240 } else {
241 /* Everything else is handled by libfcoe */
242 __skb_pull(skb, ETH_HLEN);
243 fcoe_ctlr_recv(&qedf->ctlr, skb);
244 }
245}
246
247void qedf_update_src_mac(struct fc_lport *lport, u8 *addr)
248{
249 struct qedf_ctx *qedf = lport_priv(lport);
250
251 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
252 "Setting data_src_addr=%pM.\n", addr);
253 ether_addr_copy(qedf->data_src_addr, addr);
254}
255
256u8 *qedf_get_src_mac(struct fc_lport *lport)
257{
258 u8 mac[ETH_ALEN];
259 u8 port_id[3];
260 struct qedf_ctx *qedf = lport_priv(lport);
261
262 /* We need to use the lport port_id to create the data_src_addr */
263 if (is_zero_ether_addr(qedf->data_src_addr)) {
264 hton24(port_id, lport->port_id);
265 fc_fcoe_set_mac(mac, port_id);
266 qedf->ctlr.update_mac(lport, mac);
267 }
268 return qedf->data_src_addr;
269}
diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h
new file mode 100644
index 000000000000..dfd65dec2874
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_hsi.h
@@ -0,0 +1,422 @@
1/*
2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9#ifndef __QEDF_HSI__
10#define __QEDF_HSI__
11/*
12 * Add include to common target
13 */
14#include <linux/qed/common_hsi.h>
15
16/*
17 * Add include to common storage target
18 */
19#include <linux/qed/storage_common.h>
20
21/*
22 * Add include to common fcoe target for both eCore and protocol driver
23 */
24#include <linux/qed/fcoe_common.h>
25
26
27/*
28 * FCoE CQ element ABTS information
29 */
30struct fcoe_abts_info {
31 u8 r_ctl /* R_CTL in the ABTS response frame */;
32 u8 reserved0;
33 __le16 rx_id;
34 __le32 reserved2[2];
35 __le32 fc_payload[3] /* ABTS FC payload response frame */;
36};
37
38
39/*
40 * FCoE class type
41 */
42enum fcoe_class_type {
43 FCOE_TASK_CLASS_TYPE_3,
44 FCOE_TASK_CLASS_TYPE_2,
45 MAX_FCOE_CLASS_TYPE
46};
47
48
49/*
50 * FCoE CMDQ element control information
51 */
52struct fcoe_cmdqe_control {
53 __le16 conn_id;
54 u8 num_additional_cmdqes;
55 u8 cmdType;
56 /* true for ABTS request cmdqe. used in Target mode */
57#define FCOE_CMDQE_CONTROL_ABTSREQCMD_MASK 0x1
58#define FCOE_CMDQE_CONTROL_ABTSREQCMD_SHIFT 0
59#define FCOE_CMDQE_CONTROL_RESERVED1_MASK 0x7F
60#define FCOE_CMDQE_CONTROL_RESERVED1_SHIFT 1
61 u8 reserved2[4];
62};
63
64/*
65 * FCoE control + payload CMDQ element
66 */
67struct fcoe_cmdqe {
68 struct fcoe_cmdqe_control hdr;
69 u8 fc_header[24];
70 __le32 fcp_cmd_payload[8];
71};
72
73
74
75/*
76 * FCP RSP flags
77 */
78struct fcoe_fcp_rsp_flags {
79 u8 flags;
80#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_MASK 0x1
81#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
82#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_MASK 0x1
83#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
84#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_MASK 0x1
85#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
86#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_MASK 0x1
87#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
88#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_MASK 0x1
89#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
90#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_MASK 0x7
91#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
92};
93
94/*
95 * FCoE CQ element response information
96 */
97struct fcoe_cqe_rsp_info {
98 struct fcoe_fcp_rsp_flags rsp_flags;
99 u8 scsi_status_code;
100 __le16 retry_delay_timer;
101 __le32 fcp_resid;
102 __le32 fcp_sns_len;
103 __le32 fcp_rsp_len;
104 __le16 rx_id;
105 u8 fw_error_flags;
106#define FCOE_CQE_RSP_INFO_FW_UNDERRUN_MASK 0x1 /* FW detected underrun */
107#define FCOE_CQE_RSP_INFO_FW_UNDERRUN_SHIFT 0
108#define FCOE_CQE_RSP_INFO_RESREVED_MASK 0x7F
109#define FCOE_CQE_RSP_INFO_RESREVED_SHIFT 1
110 u8 reserved;
111 __le32 fw_residual /* Residual bytes calculated by FW */;
112};
113
114/*
115 * FCoE CQ element Target completion information
116 */
117struct fcoe_cqe_target_info {
118 __le16 rx_id;
119 __le16 reserved0;
120 __le32 reserved1[5];
121};
122
123/*
124 * FCoE error/warning reporting entry
125 */
126struct fcoe_err_report_entry {
127 __le32 err_warn_bitmap_lo /* Error bitmap lower 32 bits */;
128 __le32 err_warn_bitmap_hi /* Error bitmap higher 32 bits */;
129 /* Buffer offset the beginning of the Sequence last transmitted */
130 __le32 tx_buf_off;
131 /* Buffer offset from the beginning of the Sequence last received */
132 __le32 rx_buf_off;
133 __le16 rx_id /* RX_ID of the associated task */;
134 __le16 reserved1;
135 __le32 reserved2;
136};
137
138/*
139 * FCoE CQ element middle path information
140 */
141struct fcoe_cqe_midpath_info {
142 __le32 data_placement_size;
143 __le16 rx_id;
144 __le16 reserved0;
145 __le32 reserved1[4];
146};
147
148/*
149 * FCoE CQ element unsolicited information
150 */
151struct fcoe_unsolic_info {
152 /* BD information: Physical address and opaque data */
153 struct scsi_bd bd_info;
154 __le16 conn_id /* Connection ID the frame is associated to */;
155 __le16 pkt_len /* Packet length */;
156 u8 reserved1[4];
157};
158
159/*
160 * FCoE warning reporting entry
161 */
162struct fcoe_warning_report_entry {
163 /* BD information: Physical address and opaque data */
164 struct scsi_bd bd_info;
165 /* Buffer offset the beginning of the Sequence last transmitted */
166 __le32 buf_off;
167 __le16 rx_id /* RX_ID of the associated task */;
168 __le16 reserved1;
169};
170
171/*
172 * FCoE CQ element information
173 */
174union fcoe_cqe_info {
175 struct fcoe_cqe_rsp_info rsp_info /* Response completion information */;
176 /* Target completion information */
177 struct fcoe_cqe_target_info target_info;
178 /* Error completion information */
179 struct fcoe_err_report_entry err_info;
180 struct fcoe_abts_info abts_info /* ABTS completion information */;
181 /* Middle path completion information */
182 struct fcoe_cqe_midpath_info midpath_info;
183 /* Unsolicited packet completion information */
184 struct fcoe_unsolic_info unsolic_info;
185 /* Warning completion information (Rec Tov expiration) */
186 struct fcoe_warning_report_entry warn_info;
187};
188
189/*
190 * FCoE CQ element
191 */
192struct fcoe_cqe {
193 __le32 cqe_data;
194 /* The task identifier (OX_ID) to be completed */
195#define FCOE_CQE_TASK_ID_MASK 0xFFFF
196#define FCOE_CQE_TASK_ID_SHIFT 0
197 /*
198 * The CQE type: 0x0 Indicating on a pending work request completion.
199 * 0x1 - Indicating on an unsolicited event notification. use enum
200 * fcoe_cqe_type (use enum fcoe_cqe_type)
201 */
202#define FCOE_CQE_CQE_TYPE_MASK 0xF
203#define FCOE_CQE_CQE_TYPE_SHIFT 16
204#define FCOE_CQE_RESERVED0_MASK 0xFFF
205#define FCOE_CQE_RESERVED0_SHIFT 20
206 __le16 reserved1;
207 __le16 fw_cq_prod;
208 union fcoe_cqe_info cqe_info;
209};
210
211/*
212 * FCoE CQE type
213 */
214enum fcoe_cqe_type {
215 /* solicited response on a R/W or middle-path SQE */
216 FCOE_GOOD_COMPLETION_CQE_TYPE,
217 FCOE_UNSOLIC_CQE_TYPE /* unsolicited packet, RQ consumed */,
218 FCOE_ERROR_DETECTION_CQE_TYPE /* timer expiration, validation error */,
219 FCOE_WARNING_CQE_TYPE /* rec_tov or rr_tov timer expiration */,
220 FCOE_EXCH_CLEANUP_CQE_TYPE /* task cleanup completed */,
221 FCOE_ABTS_CQE_TYPE /* ABTS received and task cleaned */,
222 FCOE_DUMMY_CQE_TYPE /* just increment SQ CONS */,
223 /* Task was completed wight after sending a pkt to the target */
224 FCOE_LOCAL_COMP_CQE_TYPE,
225 MAX_FCOE_CQE_TYPE
226};
227
228
229/*
230 * FCoE device type
231 */
232enum fcoe_device_type {
233 FCOE_TASK_DEV_TYPE_DISK,
234 FCOE_TASK_DEV_TYPE_TAPE,
235 MAX_FCOE_DEVICE_TYPE
236};
237
238
239
240
241/*
242 * FCoE fast path error codes
243 */
244enum fcoe_fp_error_warning_code {
245 FCOE_ERROR_CODE_XFER_OOO_RO /* XFER error codes */,
246 FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED,
247 FCOE_ERROR_CODE_XFER_NULL_BURST_LEN,
248 FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS,
249 FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE,
250 FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE,
251 FCOE_ERROR_CODE_XFER_PEND_XFER_SET,
252 FCOE_ERROR_CODE_XFER_OPENED_SEQ,
253 FCOE_ERROR_CODE_XFER_FCTL,
254 FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET /* FCP RSP error codes */,
255 FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD,
256 FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD,
257 FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE,
258 FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET,
259 FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ,
260 FCOE_ERROR_CODE_FCP_RSP_FCTL,
261 FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET,
262 FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET,
263 FCOE_ERROR_CODE_DATA_OOO_RO /* FCP DATA error codes */,
264 FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE,
265 FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS,
266 FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET,
267 FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET,
268 FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET,
269 FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET,
270 FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ,
271 FCOE_ERROR_CODE_DATA_FCTL_INITIATIR,
272 FCOE_ERROR_CODE_MIDPATH_INVALID_TYPE /* Middle path error codes */,
273 FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET,
274 FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET,
275 FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET,
276 FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET,
277 FCOE_ERROR_CODE_MIDPATH_REPLY_FCTL,
278 FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY,
279 FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL,
280 FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD /* Common error codes */,
281 FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE,
282 FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH,
283 FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT,
284 FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH,
285 FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES,
286 FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR,
287 FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG,
288 FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED,
289 FCOE_ERROR_CODE_COMMON_TASK_DDF_RCTL_INFO_FIELD,
290 FCOE_ERROR_CODE_COMMON_TASK_INVALID_RCTL,
291 FCOE_ERROR_CODE_COMMON_TASK_RCTL_GENERAL_MISMATCH,
292 FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION /* Timer error codes */,
293 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION /* Timer error codes */,
294 FCOE_ERROR_CODE_RR_TOV_TIMER_EXPIRATION /* Timer error codes */,
295 /* ABTSrsp pckt arrived unexpected */
296 FCOE_ERROR_CODE_ABTS_REPLY_UNEXPECTED,
297 FCOE_ERROR_CODE_TARGET_MODE_FCP_RSP,
298 FCOE_ERROR_CODE_TARGET_MODE_FCP_XFER,
299 FCOE_ERROR_CODE_TARGET_MODE_DATA_TASK_TYPE_NOT_WRITE,
300 FCOE_ERROR_CODE_DATA_FCTL_TARGET,
301 FCOE_ERROR_CODE_TARGET_DATA_SIZE_NO_MATCH_XFER,
302 FCOE_ERROR_CODE_TARGET_DIF_CRC_CHECKSUM_ERROR,
303 FCOE_ERROR_CODE_TARGET_DIF_REF_TAG_ERROR,
304 FCOE_ERROR_CODE_TARGET_DIF_APP_TAG_ERROR,
305 MAX_FCOE_FP_ERROR_WARNING_CODE
306};
307
308
309/*
310 * FCoE RESPQ element
311 */
312struct fcoe_respqe {
313 __le16 ox_id /* OX_ID that is located in the FCP_RSP FC header */;
314 __le16 rx_id /* RX_ID that is located in the FCP_RSP FC header */;
315 __le32 additional_info;
316/* PARAM that is located in the FCP_RSP FC header */
317#define FCOE_RESPQE_PARAM_MASK 0xFFFFFF
318#define FCOE_RESPQE_PARAM_SHIFT 0
319/* Indication whther its Target-auto-rsp mode or not */
320#define FCOE_RESPQE_TARGET_AUTO_RSP_MASK 0xFF
321#define FCOE_RESPQE_TARGET_AUTO_RSP_SHIFT 24
322};
323
324
325/*
326 * FCoE slow path error codes
327 */
328enum fcoe_sp_error_code {
329 /* Error codes for Error Reporting in slow path flows */
330 FCOE_ERROR_CODE_SLOW_PATH_TOO_MANY_FUNCS,
331 FCOE_ERROR_SLOW_PATH_CODE_NO_LICENSE,
332 MAX_FCOE_SP_ERROR_CODE
333};
334
335
336/*
337 * FCoE SQE request type
338 */
339enum fcoe_sqe_request_type {
340 SEND_FCOE_CMD,
341 SEND_FCOE_MIDPATH,
342 SEND_FCOE_ABTS_REQUEST,
343 FCOE_EXCHANGE_CLEANUP,
344 FCOE_SEQUENCE_RECOVERY,
345 SEND_FCOE_XFER_RDY,
346 SEND_FCOE_RSP,
347 SEND_FCOE_RSP_WITH_SENSE_DATA,
348 SEND_FCOE_TARGET_DATA,
349 SEND_FCOE_INITIATOR_DATA,
350 /*
351 * Xfer Continuation (==1) ready to be sent. Previous XFERs data
352 * received successfully.
353 */
354 SEND_FCOE_XFER_CONTINUATION_RDY,
355 SEND_FCOE_TARGET_ABTS_RSP,
356 MAX_FCOE_SQE_REQUEST_TYPE
357};
358
359
360/*
361 * FCoE task TX state
362 */
363enum fcoe_task_tx_state {
364 /* Initiate state after driver has initialized the task */
365 FCOE_TASK_TX_STATE_NORMAL,
366 /* Updated by TX path after complete transmitting unsolicited packet */
367 FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED,
368 /*
369 * Updated by TX path after start processing the task requesting the
370 * cleanup/abort operation
371 */
372 FCOE_TASK_TX_STATE_CLEAN_REQ,
373 FCOE_TASK_TX_STATE_ABTS /* Updated by TX path during abort procedure */,
374 /* Updated by TX path during exchange cleanup procedure */
375 FCOE_TASK_TX_STATE_EXCLEANUP,
376 /*
377 * Updated by TX path during exchange cleanup continuation task
378 * procedure
379 */
380 FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE_CONT,
381 /* Updated by TX path during exchange cleanup first xfer procedure */
382 FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE,
383 /* Updated by TX path during exchange cleanup read task in Target */
384 FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_READ_OR_RSP,
385 /* Updated by TX path during target exchange cleanup procedure */
386 FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE_LAST_CYCLE,
387 /* Updated by TX path during sequence recovery procedure */
388 FCOE_TASK_TX_STATE_SEQRECOVERY,
389 MAX_FCOE_TASK_TX_STATE
390};
391
392
393/*
394 * FCoE task type
395 */
396enum fcoe_task_type {
397 FCOE_TASK_TYPE_WRITE_INITIATOR,
398 FCOE_TASK_TYPE_READ_INITIATOR,
399 FCOE_TASK_TYPE_MIDPATH,
400 FCOE_TASK_TYPE_UNSOLICITED,
401 FCOE_TASK_TYPE_ABTS,
402 FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
403 FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
404 FCOE_TASK_TYPE_WRITE_TARGET,
405 FCOE_TASK_TYPE_READ_TARGET,
406 FCOE_TASK_TYPE_RSP,
407 FCOE_TASK_TYPE_RSP_SENSE_DATA,
408 FCOE_TASK_TYPE_ABTS_TARGET,
409 FCOE_TASK_TYPE_ENUM_SIZE,
410 MAX_FCOE_TASK_TYPE
411};
412
413struct scsi_glbl_queue_entry {
414 /* Start physical address for the RQ (receive queue) PBL. */
415 struct regpair rq_pbl_addr;
416 /* Start physical address for the CQ (completion queue) PBL. */
417 struct regpair cq_pbl_addr;
418 /* Start physical address for the CMDQ (command queue) PBL. */
419 struct regpair cmdq_pbl_addr;
420};
421
422#endif /* __QEDF_HSI__ */
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
new file mode 100644
index 000000000000..486c045ac8bb
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -0,0 +1,2282 @@
1/*
2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9#include <linux/spinlock.h>
10#include <linux/vmalloc.h>
11#include "qedf.h"
12#include <scsi/scsi_tcq.h>
13
14void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
15 unsigned int timer_msec)
16{
17 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
18 msecs_to_jiffies(timer_msec));
19}
20
21static void qedf_cmd_timeout(struct work_struct *work)
22{
23
24 struct qedf_ioreq *io_req =
25 container_of(work, struct qedf_ioreq, timeout_work.work);
26 struct qedf_ctx *qedf = io_req->fcport->qedf;
27 struct qedf_rport *fcport = io_req->fcport;
28 u8 op = 0;
29
30 switch (io_req->cmd_type) {
31 case QEDF_ABTS:
32 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
33 io_req->xid);
34 /* Cleanup timed out ABTS */
35 qedf_initiate_cleanup(io_req, true);
36 complete(&io_req->abts_done);
37
38 /*
39 * Need to call kref_put for reference taken when initiate_abts
40 * was called since abts_compl won't be called now that we've
41 * cleaned up the task.
42 */
43 kref_put(&io_req->refcount, qedf_release_cmd);
44
45 /*
46 * Now that the original I/O and the ABTS are complete see
47 * if we need to reconnect to the target.
48 */
49 qedf_restart_rport(fcport);
50 break;
51 case QEDF_ELS:
52 kref_get(&io_req->refcount);
53 /*
54 * Don't attempt to clean an ELS timeout as any subseqeunt
55 * ABTS or cleanup requests just hang. For now just free
56 * the resources of the original I/O and the RRQ
57 */
58 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
59 io_req->xid);
60 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
61 /* Call callback function to complete command */
62 if (io_req->cb_func && io_req->cb_arg) {
63 op = io_req->cb_arg->op;
64 io_req->cb_func(io_req->cb_arg);
65 io_req->cb_arg = NULL;
66 }
67 qedf_initiate_cleanup(io_req, true);
68 kref_put(&io_req->refcount, qedf_release_cmd);
69 break;
70 case QEDF_SEQ_CLEANUP:
71 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
72 "xid=0x%x.\n", io_req->xid);
73 qedf_initiate_cleanup(io_req, true);
74 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
75 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
76 break;
77 default:
78 break;
79 }
80}
81
82void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
83{
84 struct io_bdt *bdt_info;
85 struct qedf_ctx *qedf = cmgr->qedf;
86 size_t bd_tbl_sz;
87 u16 min_xid = QEDF_MIN_XID;
88 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
89 int num_ios;
90 int i;
91 struct qedf_ioreq *io_req;
92
93 num_ios = max_xid - min_xid + 1;
94
95 /* Free fcoe_bdt_ctx structures */
96 if (!cmgr->io_bdt_pool)
97 goto free_cmd_pool;
98
99 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge);
100 for (i = 0; i < num_ios; i++) {
101 bdt_info = cmgr->io_bdt_pool[i];
102 if (bdt_info->bd_tbl) {
103 dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
104 bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
105 bdt_info->bd_tbl = NULL;
106 }
107 }
108
109 /* Destroy io_bdt pool */
110 for (i = 0; i < num_ios; i++) {
111 kfree(cmgr->io_bdt_pool[i]);
112 cmgr->io_bdt_pool[i] = NULL;
113 }
114
115 kfree(cmgr->io_bdt_pool);
116 cmgr->io_bdt_pool = NULL;
117
118free_cmd_pool:
119
120 for (i = 0; i < num_ios; i++) {
121 io_req = &cmgr->cmds[i];
122 /* Make sure we free per command sense buffer */
123 if (io_req->sense_buffer)
124 dma_free_coherent(&qedf->pdev->dev,
125 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
126 io_req->sense_buffer_dma);
127 cancel_delayed_work_sync(&io_req->rrq_work);
128 }
129
130 /* Free command manager itself */
131 vfree(cmgr);
132}
133
134static void qedf_handle_rrq(struct work_struct *work)
135{
136 struct qedf_ioreq *io_req =
137 container_of(work, struct qedf_ioreq, rrq_work.work);
138
139 qedf_send_rrq(io_req);
140
141}
142
143struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
144{
145 struct qedf_cmd_mgr *cmgr;
146 struct io_bdt *bdt_info;
147 struct qedf_ioreq *io_req;
148 u16 xid;
149 int i;
150 int num_ios;
151 u16 min_xid = QEDF_MIN_XID;
152 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
153
154 /* Make sure num_queues is already set before calling this function */
155 if (!qedf->num_queues) {
156 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
157 return NULL;
158 }
159
160 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
161 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
162 "max_xid 0x%x.\n", min_xid, max_xid);
163 return NULL;
164 }
165
166 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
167 "0x%x.\n", min_xid, max_xid);
168
169 num_ios = max_xid - min_xid + 1;
170
171 cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
172 if (!cmgr) {
173 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
174 return NULL;
175 }
176
177 cmgr->qedf = qedf;
178 spin_lock_init(&cmgr->lock);
179
180 /*
181 * Initialize list of qedf_ioreq.
182 */
183 xid = QEDF_MIN_XID;
184
185 for (i = 0; i < num_ios; i++) {
186 io_req = &cmgr->cmds[i];
187 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
188
189 io_req->xid = xid++;
190
191 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
192
193 /* Allocate DMA memory to hold sense buffer */
194 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
195 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
196 GFP_KERNEL);
197 if (!io_req->sense_buffer)
198 goto mem_err;
199 }
200
201 /* Allocate pool of io_bdts - one for each qedf_ioreq */
202 cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
203 GFP_KERNEL);
204
205 if (!cmgr->io_bdt_pool) {
206 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
207 goto mem_err;
208 }
209
210 for (i = 0; i < num_ios; i++) {
211 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
212 GFP_KERNEL);
213 if (!cmgr->io_bdt_pool[i]) {
214 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
215 "io_bdt_pool[%d].\n", i);
216 goto mem_err;
217 }
218 }
219
220 for (i = 0; i < num_ios; i++) {
221 bdt_info = cmgr->io_bdt_pool[i];
222 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
223 QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge),
224 &bdt_info->bd_tbl_dma, GFP_KERNEL);
225 if (!bdt_info->bd_tbl) {
226 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
227 "bdt_tbl[%d].\n", i);
228 goto mem_err;
229 }
230 }
231 atomic_set(&cmgr->free_list_cnt, num_ios);
232 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
233 "cmgr->free_list_cnt=%d.\n",
234 atomic_read(&cmgr->free_list_cnt));
235
236 return cmgr;
237
238mem_err:
239 qedf_cmd_mgr_free(cmgr);
240 return NULL;
241}
242
243struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
244{
245 struct qedf_ctx *qedf = fcport->qedf;
246 struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
247 struct qedf_ioreq *io_req = NULL;
248 struct io_bdt *bd_tbl;
249 u16 xid;
250 uint32_t free_sqes;
251 int i;
252 unsigned long flags;
253
254 free_sqes = atomic_read(&fcport->free_sqes);
255
256 if (!free_sqes) {
257 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
258 "Returning NULL, free_sqes=%d.\n ",
259 free_sqes);
260 goto out_failed;
261 }
262
263 /* Limit the number of outstanding R/W tasks */
264 if ((atomic_read(&fcport->num_active_ios) >=
265 NUM_RW_TASKS_PER_CONNECTION)) {
266 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
267 "Returning NULL, num_active_ios=%d.\n",
268 atomic_read(&fcport->num_active_ios));
269 goto out_failed;
270 }
271
272 /* Limit global TIDs certain tasks */
273 if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
274 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
275 "Returning NULL, free_list_cnt=%d.\n",
276 atomic_read(&cmd_mgr->free_list_cnt));
277 goto out_failed;
278 }
279
280 spin_lock_irqsave(&cmd_mgr->lock, flags);
281 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
282 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
283 cmd_mgr->idx++;
284 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
285 cmd_mgr->idx = 0;
286
287 /* Check to make sure command was previously freed */
288 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags))
289 break;
290 }
291
292 if (i == FCOE_PARAMS_NUM_TASKS) {
293 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
294 goto out_failed;
295 }
296
297 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
298 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
299
300 atomic_inc(&fcport->num_active_ios);
301 atomic_dec(&fcport->free_sqes);
302 xid = io_req->xid;
303 atomic_dec(&cmd_mgr->free_list_cnt);
304
305 io_req->cmd_mgr = cmd_mgr;
306 io_req->fcport = fcport;
307
308 /* Hold the io_req against deletion */
309 kref_init(&io_req->refcount);
310
311 /* Bind io_bdt for this io_req */
312 /* Have a static link between io_req and io_bdt_pool */
313 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
314 if (bd_tbl == NULL) {
315 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
316 kref_put(&io_req->refcount, qedf_release_cmd);
317 goto out_failed;
318 }
319 bd_tbl->io_req = io_req;
320 io_req->cmd_type = cmd_type;
321
322 /* Reset sequence offset data */
323 io_req->rx_buf_off = 0;
324 io_req->tx_buf_off = 0;
325 io_req->rx_id = 0xffff; /* No OX_ID */
326
327 return io_req;
328
329out_failed:
330 /* Record failure for stats and return NULL to caller */
331 qedf->alloc_failures++;
332 return NULL;
333}
334
335static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
336{
337 struct qedf_mp_req *mp_req = &(io_req->mp_req);
338 struct qedf_ctx *qedf = io_req->fcport->qedf;
339 uint64_t sz = sizeof(struct fcoe_sge);
340
341 /* clear tm flags */
342 mp_req->tm_flags = 0;
343 if (mp_req->mp_req_bd) {
344 dma_free_coherent(&qedf->pdev->dev, sz,
345 mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
346 mp_req->mp_req_bd = NULL;
347 }
348 if (mp_req->mp_resp_bd) {
349 dma_free_coherent(&qedf->pdev->dev, sz,
350 mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
351 mp_req->mp_resp_bd = NULL;
352 }
353 if (mp_req->req_buf) {
354 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
355 mp_req->req_buf, mp_req->req_buf_dma);
356 mp_req->req_buf = NULL;
357 }
358 if (mp_req->resp_buf) {
359 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
360 mp_req->resp_buf, mp_req->resp_buf_dma);
361 mp_req->resp_buf = NULL;
362 }
363}
364
365void qedf_release_cmd(struct kref *ref)
366{
367 struct qedf_ioreq *io_req =
368 container_of(ref, struct qedf_ioreq, refcount);
369 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
370 struct qedf_rport *fcport = io_req->fcport;
371
372 if (io_req->cmd_type == QEDF_ELS ||
373 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
374 qedf_free_mp_resc(io_req);
375
376 atomic_inc(&cmd_mgr->free_list_cnt);
377 atomic_dec(&fcport->num_active_ios);
378 if (atomic_read(&fcport->num_active_ios) < 0)
379 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
380
381 /* Increment task retry identifier now that the request is released */
382 io_req->task_retry_identifier++;
383
384 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
385}
386
387static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
388 int bd_index)
389{
390 struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
391 int frag_size, sg_frags;
392
393 sg_frags = 0;
394 while (sg_len) {
395 if (sg_len > QEDF_BD_SPLIT_SZ)
396 frag_size = QEDF_BD_SPLIT_SZ;
397 else
398 frag_size = sg_len;
399 bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
400 bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
401 bd[bd_index + sg_frags].size = (uint16_t)frag_size;
402
403 addr += (u64)frag_size;
404 sg_frags++;
405 sg_len -= frag_size;
406 }
407 return sg_frags;
408}
409
410static int qedf_map_sg(struct qedf_ioreq *io_req)
411{
412 struct scsi_cmnd *sc = io_req->sc_cmd;
413 struct Scsi_Host *host = sc->device->host;
414 struct fc_lport *lport = shost_priv(host);
415 struct qedf_ctx *qedf = lport_priv(lport);
416 struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
417 struct scatterlist *sg;
418 int byte_count = 0;
419 int sg_count = 0;
420 int bd_count = 0;
421 int sg_frags;
422 unsigned int sg_len;
423 u64 addr, end_addr;
424 int i;
425
426 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
427 scsi_sg_count(sc), sc->sc_data_direction);
428
429 sg = scsi_sglist(sc);
430
431 /*
432 * New condition to send single SGE as cached-SGL with length less
433 * than 64k.
434 */
435 if ((sg_count == 1) && (sg_dma_len(sg) <=
436 QEDF_MAX_SGLEN_FOR_CACHESGL)) {
437 sg_len = sg_dma_len(sg);
438 addr = (u64)sg_dma_address(sg);
439
440 bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
441 bd[bd_count].sge_addr.hi = (addr >> 32);
442 bd[bd_count].size = (u16)sg_len;
443
444 return ++bd_count;
445 }
446
447 scsi_for_each_sg(sc, sg, sg_count, i) {
448 sg_len = sg_dma_len(sg);
449 addr = (u64)sg_dma_address(sg);
450 end_addr = (u64)(addr + sg_len);
451
452 /*
453 * First s/g element in the list so check if the end_addr
454 * is paged aligned. Also check to make sure the length is
455 * at least page size.
456 */
457 if ((i == 0) && (sg_count > 1) &&
458 ((end_addr % QEDF_PAGE_SIZE) ||
459 sg_len < QEDF_PAGE_SIZE))
460 io_req->use_slowpath = true;
461 /*
462 * Last s/g element so check if the start address is paged
463 * aligned.
464 */
465 else if ((i == (sg_count - 1)) && (sg_count > 1) &&
466 (addr % QEDF_PAGE_SIZE))
467 io_req->use_slowpath = true;
468 /*
469 * Intermediate s/g element so check if start and end address
470 * is page aligned.
471 */
472 else if ((i != 0) && (i != (sg_count - 1)) &&
473 ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE)))
474 io_req->use_slowpath = true;
475
476 if (sg_len > QEDF_MAX_BD_LEN) {
477 sg_frags = qedf_split_bd(io_req, addr, sg_len,
478 bd_count);
479 } else {
480 sg_frags = 1;
481 bd[bd_count].sge_addr.lo = U64_LO(addr);
482 bd[bd_count].sge_addr.hi = U64_HI(addr);
483 bd[bd_count].size = (uint16_t)sg_len;
484 }
485
486 bd_count += sg_frags;
487 byte_count += sg_len;
488 }
489
490 if (byte_count != scsi_bufflen(sc))
491 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
492 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
493 scsi_bufflen(sc), io_req->xid);
494
495 return bd_count;
496}
497
498static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
499{
500 struct scsi_cmnd *sc = io_req->sc_cmd;
501 struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
502 int bd_count;
503
504 if (scsi_sg_count(sc)) {
505 bd_count = qedf_map_sg(io_req);
506 if (bd_count == 0)
507 return -ENOMEM;
508 } else {
509 bd_count = 0;
510 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
511 bd[0].size = 0;
512 }
513 io_req->bd_tbl->bd_valid = bd_count;
514
515 return 0;
516}
517
518static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
519 struct fcp_cmnd *fcp_cmnd)
520{
521 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
522
523 /* fcp_cmnd is 32 bytes */
524 memset(fcp_cmnd, 0, FCP_CMND_LEN);
525
526 /* 8 bytes: SCSI LUN info */
527 int_to_scsilun(sc_cmd->device->lun,
528 (struct scsi_lun *)&fcp_cmnd->fc_lun);
529
530 /* 4 bytes: flag info */
531 fcp_cmnd->fc_pri_ta = 0;
532 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
533 fcp_cmnd->fc_flags = io_req->io_req_flags;
534 fcp_cmnd->fc_cmdref = 0;
535
536 /* Populate data direction */
537 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
538 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
539 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
540 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
541
542 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
543
544 /* 16 bytes: CDB information */
545 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
546
547 /* 4 bytes: FCP data length */
548 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
549
550}
551
552static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
553 struct qedf_ioreq *io_req, u32 *ptu_invalidate,
554 struct fcoe_task_context *task_ctx)
555{
556 enum fcoe_task_type task_type;
557 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
558 struct io_bdt *bd_tbl = io_req->bd_tbl;
559 union fcoe_data_desc_ctx *data_desc;
560 u32 *fcp_cmnd;
561 u32 tmp_fcp_cmnd[8];
562 int cnt, i;
563 int bd_count;
564 struct qedf_ctx *qedf = fcport->qedf;
565 uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
566 u8 tmp_sgl_mode = 0;
567 u8 mst_sgl_mode = 0;
568
569 memset(task_ctx, 0, sizeof(struct fcoe_task_context));
570 io_req->task = task_ctx;
571
572 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
573 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
574 else
575 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
576
577 /* Y Storm context */
578 task_ctx->ystorm_st_context.expect_first_xfer = 1;
579 task_ctx->ystorm_st_context.data_2_trns_rem = io_req->data_xfer_len;
580 /* Check if this is required */
581 task_ctx->ystorm_st_context.ox_id = io_req->xid;
582 task_ctx->ystorm_st_context.task_rety_identifier =
583 io_req->task_retry_identifier;
584
585 /* T Storm ag context */
586 SET_FIELD(task_ctx->tstorm_ag_context.flags0,
587 TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, PROTOCOLID_FCOE);
588 task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
589
590 /* T Storm st context */
591 SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
592 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
593 1);
594 task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
595
596 task_ctx->tstorm_st_context.read_only.dev_type =
597 FCOE_TASK_DEV_TYPE_DISK;
598 task_ctx->tstorm_st_context.read_only.conf_supported = 0;
599 task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
600
601 /* Completion queue for response. */
602 task_ctx->tstorm_st_context.read_only.glbl_q_num = cq_idx;
603 task_ctx->tstorm_st_context.read_only.fcp_cmd_trns_size =
604 io_req->data_xfer_len;
605 task_ctx->tstorm_st_context.read_write.e_d_tov_exp_timeout_val =
606 lport->e_d_tov;
607
608 task_ctx->ustorm_ag_context.global_cq_num = cq_idx;
609 io_req->fp_idx = cq_idx;
610
611 bd_count = bd_tbl->bd_valid;
612 if (task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
613 /* Setup WRITE task */
614 struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
615
616 task_ctx->ystorm_st_context.task_type =
617 FCOE_TASK_TYPE_WRITE_INITIATOR;
618 data_desc = &task_ctx->ystorm_st_context.data_desc;
619
620 if (io_req->use_slowpath) {
621 SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
622 YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
623 FCOE_SLOW_SGL);
624 data_desc->slow.base_sgl_addr.lo =
625 U64_LO(bd_tbl->bd_tbl_dma);
626 data_desc->slow.base_sgl_addr.hi =
627 U64_HI(bd_tbl->bd_tbl_dma);
628 data_desc->slow.remainder_num_sges = bd_count;
629 data_desc->slow.curr_sge_off = 0;
630 data_desc->slow.curr_sgl_index = 0;
631 qedf->slow_sge_ios++;
632 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
633 } else {
634 SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
635 YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
636 (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
637 FCOE_MUL_FAST_SGES);
638
639 if (bd_count == 1) {
640 data_desc->single_sge.sge_addr.lo =
641 fcoe_bd_tbl->sge_addr.lo;
642 data_desc->single_sge.sge_addr.hi =
643 fcoe_bd_tbl->sge_addr.hi;
644 data_desc->single_sge.size =
645 fcoe_bd_tbl->size;
646 data_desc->single_sge.is_valid_sge = 0;
647 qedf->single_sge_ios++;
648 io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
649 } else {
650 data_desc->fast.sgl_start_addr.lo =
651 U64_LO(bd_tbl->bd_tbl_dma);
652 data_desc->fast.sgl_start_addr.hi =
653 U64_HI(bd_tbl->bd_tbl_dma);
654 data_desc->fast.sgl_byte_offset =
655 data_desc->fast.sgl_start_addr.lo &
656 (QEDF_PAGE_SIZE - 1);
657 if (data_desc->fast.sgl_byte_offset > 0)
658 QEDF_ERR(&(qedf->dbg_ctx),
659 "byte_offset=%u for xid=0x%x.\n",
660 io_req->xid,
661 data_desc->fast.sgl_byte_offset);
662 data_desc->fast.task_reuse_cnt =
663 io_req->reuse_count;
664 io_req->reuse_count++;
665 if (io_req->reuse_count == QEDF_MAX_REUSE) {
666 *ptu_invalidate = 1;
667 io_req->reuse_count = 0;
668 }
669 qedf->fast_sge_ios++;
670 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
671 }
672 }
673
674 /* T Storm context */
675 task_ctx->tstorm_st_context.read_only.task_type =
676 FCOE_TASK_TYPE_WRITE_INITIATOR;
677
678 /* M Storm context */
679 tmp_sgl_mode = GET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
680 YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE);
681 SET_FIELD(task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
682 FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE,
683 tmp_sgl_mode);
684
685 } else {
686 /* Setup READ task */
687
688 /* M Storm context */
689 struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
690
691 data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
692 task_ctx->mstorm_st_context.fp.data_2_trns_rem =
693 io_req->data_xfer_len;
694
695 if (io_req->use_slowpath) {
696 SET_FIELD(
697 task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
698 FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
699 FCOE_SLOW_SGL);
700 data_desc->slow.base_sgl_addr.lo =
701 U64_LO(bd_tbl->bd_tbl_dma);
702 data_desc->slow.base_sgl_addr.hi =
703 U64_HI(bd_tbl->bd_tbl_dma);
704 data_desc->slow.remainder_num_sges =
705 bd_count;
706 data_desc->slow.curr_sge_off = 0;
707 data_desc->slow.curr_sgl_index = 0;
708 qedf->slow_sge_ios++;
709 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
710 } else {
711 SET_FIELD(
712 task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
713 FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
714 (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
715 FCOE_MUL_FAST_SGES);
716
717 if (bd_count == 1) {
718 data_desc->single_sge.sge_addr.lo =
719 fcoe_bd_tbl->sge_addr.lo;
720 data_desc->single_sge.sge_addr.hi =
721 fcoe_bd_tbl->sge_addr.hi;
722 data_desc->single_sge.size =
723 fcoe_bd_tbl->size;
724 data_desc->single_sge.is_valid_sge = 0;
725 qedf->single_sge_ios++;
726 io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
727 } else {
728 data_desc->fast.sgl_start_addr.lo =
729 U64_LO(bd_tbl->bd_tbl_dma);
730 data_desc->fast.sgl_start_addr.hi =
731 U64_HI(bd_tbl->bd_tbl_dma);
732 data_desc->fast.sgl_byte_offset = 0;
733 data_desc->fast.task_reuse_cnt =
734 io_req->reuse_count;
735 io_req->reuse_count++;
736 if (io_req->reuse_count == QEDF_MAX_REUSE) {
737 *ptu_invalidate = 1;
738 io_req->reuse_count = 0;
739 }
740 qedf->fast_sge_ios++;
741 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
742 }
743 }
744
745 /* Y Storm context */
746 task_ctx->ystorm_st_context.expect_first_xfer = 0;
747 task_ctx->ystorm_st_context.task_type =
748 FCOE_TASK_TYPE_READ_INITIATOR;
749
750 /* T Storm context */
751 task_ctx->tstorm_st_context.read_only.task_type =
752 FCOE_TASK_TYPE_READ_INITIATOR;
753 mst_sgl_mode = GET_FIELD(
754 task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
755 FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE);
756 SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
757 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
758 mst_sgl_mode);
759 }
760
761 /* fill FCP_CMND IU */
762 fcp_cmnd = (u32 *)task_ctx->ystorm_st_context.tx_info_union.fcp_cmd_payload.opaque;
763 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
764
765 /* Swap fcp_cmnd since FC is big endian */
766 cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
767
768 for (i = 0; i < cnt; i++) {
769 *fcp_cmnd = cpu_to_be32(tmp_fcp_cmnd[i]);
770 fcp_cmnd++;
771 }
772
773 /* M Storm context - Sense buffer */
774 task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
775 U64_LO(io_req->sense_buffer_dma);
776 task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
777 U64_HI(io_req->sense_buffer_dma);
778}
779
780void qedf_init_mp_task(struct qedf_ioreq *io_req,
781 struct fcoe_task_context *task_ctx)
782{
783 struct qedf_mp_req *mp_req = &(io_req->mp_req);
784 struct qedf_rport *fcport = io_req->fcport;
785 struct qedf_ctx *qedf = io_req->fcport->qedf;
786 struct fc_frame_header *fc_hdr;
787 enum fcoe_task_type task_type = 0;
788 union fcoe_data_desc_ctx *data_desc;
789
790 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Initializing MP task "
791 "for cmd_type = %d\n", io_req->cmd_type);
792
793 qedf->control_requests++;
794
795 /* Obtain task_type */
796 if ((io_req->cmd_type == QEDF_TASK_MGMT_CMD) ||
797 (io_req->cmd_type == QEDF_ELS)) {
798 task_type = FCOE_TASK_TYPE_MIDPATH;
799 } else if (io_req->cmd_type == QEDF_ABTS) {
800 task_type = FCOE_TASK_TYPE_ABTS;
801 }
802
803 memset(task_ctx, 0, sizeof(struct fcoe_task_context));
804
805 /* Setup the task from io_req for easy reference */
806 io_req->task = task_ctx;
807
808 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "task type = %d\n",
809 task_type);
810
811 /* YSTORM only */
812 {
813 /* Initialize YSTORM task context */
814 struct fcoe_tx_mid_path_params *task_fc_hdr =
815 &task_ctx->ystorm_st_context.tx_info_union.tx_params.mid_path;
816 memset(task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
817 task_ctx->ystorm_st_context.task_rety_identifier =
818 io_req->task_retry_identifier;
819
820 /* Init SGL parameters */
821 if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
822 (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
823 data_desc = &task_ctx->ystorm_st_context.data_desc;
824 data_desc->slow.base_sgl_addr.lo =
825 U64_LO(mp_req->mp_req_bd_dma);
826 data_desc->slow.base_sgl_addr.hi =
827 U64_HI(mp_req->mp_req_bd_dma);
828 data_desc->slow.remainder_num_sges = 1;
829 data_desc->slow.curr_sge_off = 0;
830 data_desc->slow.curr_sgl_index = 0;
831 }
832
833 fc_hdr = &(mp_req->req_fc_hdr);
834 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
835 fc_hdr->fh_ox_id = io_req->xid;
836 fc_hdr->fh_rx_id = htons(0xffff);
837 } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
838 fc_hdr->fh_rx_id = io_req->xid;
839 }
840
841 /* Fill FC Header into middle path buffer */
842 task_fc_hdr->parameter = fc_hdr->fh_parm_offset;
843 task_fc_hdr->r_ctl = fc_hdr->fh_r_ctl;
844 task_fc_hdr->type = fc_hdr->fh_type;
845 task_fc_hdr->cs_ctl = fc_hdr->fh_cs_ctl;
846 task_fc_hdr->df_ctl = fc_hdr->fh_df_ctl;
847 task_fc_hdr->rx_id = fc_hdr->fh_rx_id;
848 task_fc_hdr->ox_id = fc_hdr->fh_ox_id;
849
850 task_ctx->ystorm_st_context.data_2_trns_rem =
851 io_req->data_xfer_len;
852 task_ctx->ystorm_st_context.task_type = task_type;
853 }
854
855 /* TSTORM ONLY */
856 {
857 task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
858 task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
859 /* Always send middle-path repsonses on CQ #0 */
860 task_ctx->tstorm_st_context.read_only.glbl_q_num = 0;
861 io_req->fp_idx = 0;
862 SET_FIELD(task_ctx->tstorm_ag_context.flags0,
863 TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE,
864 PROTOCOLID_FCOE);
865 task_ctx->tstorm_st_context.read_only.task_type = task_type;
866 SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
867 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
868 1);
869 task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
870 }
871
872 /* MSTORM only */
873 {
874 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
875 /* Initialize task context */
876 data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
877
878 /* Set cache sges address and length */
879 data_desc->slow.base_sgl_addr.lo =
880 U64_LO(mp_req->mp_resp_bd_dma);
881 data_desc->slow.base_sgl_addr.hi =
882 U64_HI(mp_req->mp_resp_bd_dma);
883 data_desc->slow.remainder_num_sges = 1;
884 data_desc->slow.curr_sge_off = 0;
885 data_desc->slow.curr_sgl_index = 0;
886
887 /*
888 * Also need to fil in non-fastpath response address
889 * for middle path commands.
890 */
891 task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
892 U64_LO(mp_req->mp_resp_bd_dma);
893 task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
894 U64_HI(mp_req->mp_resp_bd_dma);
895 }
896 }
897
898 /* USTORM ONLY */
899 {
900 task_ctx->ustorm_ag_context.global_cq_num = 0;
901 }
902
903 /* I/O stats. Middle path commands always use slow SGEs */
904 qedf->slow_sge_ios++;
905 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
906}
907
908void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, u32 ptu_invalidate,
909 enum fcoe_task_type req_type, u32 offset)
910{
911 struct fcoe_wqe *sqe;
912 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
913
914 sqe = &fcport->sq[fcport->sq_prod_idx];
915
916 fcport->sq_prod_idx++;
917 fcport->fw_sq_prod_idx++;
918 if (fcport->sq_prod_idx == total_sqe)
919 fcport->sq_prod_idx = 0;
920
921 switch (req_type) {
922 case FCOE_TASK_TYPE_WRITE_INITIATOR:
923 case FCOE_TASK_TYPE_READ_INITIATOR:
924 SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_CMD);
925 if (ptu_invalidate)
926 SET_FIELD(sqe->flags, FCOE_WQE_INVALIDATE_PTU, 1);
927 break;
928 case FCOE_TASK_TYPE_MIDPATH:
929 SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_MIDPATH);
930 break;
931 case FCOE_TASK_TYPE_ABTS:
932 SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
933 SEND_FCOE_ABTS_REQUEST);
934 break;
935 case FCOE_TASK_TYPE_EXCHANGE_CLEANUP:
936 SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
937 FCOE_EXCHANGE_CLEANUP);
938 break;
939 case FCOE_TASK_TYPE_SEQUENCE_CLEANUP:
940 SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
941 FCOE_SEQUENCE_RECOVERY);
942 /* NOTE: offset param only used for sequence recovery */
943 sqe->additional_info_union.seq_rec_updated_offset = offset;
944 break;
945 case FCOE_TASK_TYPE_UNSOLICITED:
946 break;
947 default:
948 break;
949 }
950
951 sqe->task_id = xid;
952
953 /* Make sure SQ data is coherent */
954 wmb();
955
956}
957
958void qedf_ring_doorbell(struct qedf_rport *fcport)
959{
960 struct fcoe_db_data dbell = { 0 };
961
962 dbell.agg_flags = 0;
963
964 dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
965 dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
966 dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
967 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
968
969 dbell.sq_prod = fcport->fw_sq_prod_idx;
970 writel(*(u32 *)&dbell, fcport->p_doorbell);
971 /* Make sure SQ index is updated so f/w prcesses requests in order */
972 wmb();
973 mmiowb();
974}
975
976static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
977 int8_t direction)
978{
979 struct qedf_ctx *qedf = fcport->qedf;
980 struct qedf_io_log *io_log;
981 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
982 unsigned long flags;
983 uint8_t op;
984
985 spin_lock_irqsave(&qedf->io_trace_lock, flags);
986
987 io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
988 io_log->direction = direction;
989 io_log->task_id = io_req->xid;
990 io_log->port_id = fcport->rdata->ids.port_id;
991 io_log->lun = sc_cmd->device->lun;
992 io_log->op = op = sc_cmd->cmnd[0];
993 io_log->lba[0] = sc_cmd->cmnd[2];
994 io_log->lba[1] = sc_cmd->cmnd[3];
995 io_log->lba[2] = sc_cmd->cmnd[4];
996 io_log->lba[3] = sc_cmd->cmnd[5];
997 io_log->bufflen = scsi_bufflen(sc_cmd);
998 io_log->sg_count = scsi_sg_count(sc_cmd);
999 io_log->result = sc_cmd->result;
1000 io_log->jiffies = jiffies;
1001 io_log->refcount = atomic_read(&io_req->refcount.refcount);
1002
1003 if (direction == QEDF_IO_TRACE_REQ) {
1004 /* For requests we only care abot the submission CPU */
1005 io_log->req_cpu = io_req->cpu;
1006 io_log->int_cpu = 0;
1007 io_log->rsp_cpu = 0;
1008 } else if (direction == QEDF_IO_TRACE_RSP) {
1009 io_log->req_cpu = io_req->cpu;
1010 io_log->int_cpu = io_req->int_cpu;
1011 io_log->rsp_cpu = smp_processor_id();
1012 }
1013
1014 io_log->sge_type = io_req->sge_type;
1015
1016 qedf->io_trace_idx++;
1017 if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
1018 qedf->io_trace_idx = 0;
1019
1020 spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
1021}
1022
1023int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
1024{
1025 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1026 struct Scsi_Host *host = sc_cmd->device->host;
1027 struct fc_lport *lport = shost_priv(host);
1028 struct qedf_ctx *qedf = lport_priv(lport);
1029 struct fcoe_task_context *task_ctx;
1030 u16 xid;
1031 enum fcoe_task_type req_type = 0;
1032 u32 ptu_invalidate = 0;
1033
1034 /* Initialize rest of io_req fileds */
1035 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
1036 sc_cmd->SCp.ptr = (char *)io_req;
1037 io_req->use_slowpath = false; /* Assume fast SGL by default */
1038
1039 /* Record which cpu this request is associated with */
1040 io_req->cpu = smp_processor_id();
1041
1042 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1043 req_type = FCOE_TASK_TYPE_READ_INITIATOR;
1044 io_req->io_req_flags = QEDF_READ;
1045 qedf->input_requests++;
1046 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
1047 req_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
1048 io_req->io_req_flags = QEDF_WRITE;
1049 qedf->output_requests++;
1050 } else {
1051 io_req->io_req_flags = 0;
1052 qedf->control_requests++;
1053 }
1054
1055 xid = io_req->xid;
1056
1057 /* Build buffer descriptor list for firmware from sg list */
1058 if (qedf_build_bd_list_from_sg(io_req)) {
1059 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
1060 kref_put(&io_req->refcount, qedf_release_cmd);
1061 return -EAGAIN;
1062 }
1063
1064 /* Get the task context */
1065 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1066 if (!task_ctx) {
1067 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
1068 xid);
1069 kref_put(&io_req->refcount, qedf_release_cmd);
1070 return -EINVAL;
1071 }
1072
1073 qedf_init_task(fcport, lport, io_req, &ptu_invalidate, task_ctx);
1074
1075 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1076 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
1077 kref_put(&io_req->refcount, qedf_release_cmd);
1078 }
1079
1080 /* Obtain free SQ entry */
1081 qedf_add_to_sq(fcport, xid, ptu_invalidate, req_type, 0);
1082
1083 /* Ring doorbell */
1084 qedf_ring_doorbell(fcport);
1085
1086 if (qedf_io_tracing && io_req->sc_cmd)
1087 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
1088
1089 return false;
1090}
1091
1092int
1093qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
1094{
1095 struct fc_lport *lport = shost_priv(host);
1096 struct qedf_ctx *qedf = lport_priv(lport);
1097 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1098 struct fc_rport_libfc_priv *rp = rport->dd_data;
1099 struct qedf_rport *fcport = rport->dd_data;
1100 struct qedf_ioreq *io_req;
1101 int rc = 0;
1102 int rval;
1103 unsigned long flags = 0;
1104
1105
1106 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
1107 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
1108 sc_cmd->result = DID_NO_CONNECT << 16;
1109 sc_cmd->scsi_done(sc_cmd);
1110 return 0;
1111 }
1112
1113 rval = fc_remote_port_chkready(rport);
1114 if (rval) {
1115 sc_cmd->result = rval;
1116 sc_cmd->scsi_done(sc_cmd);
1117 return 0;
1118 }
1119
1120 /* Retry command if we are doing a qed drain operation */
1121 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
1122 rc = SCSI_MLQUEUE_HOST_BUSY;
1123 goto exit_qcmd;
1124 }
1125
1126 if (lport->state != LPORT_ST_READY ||
1127 atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
1128 rc = SCSI_MLQUEUE_HOST_BUSY;
1129 goto exit_qcmd;
1130 }
1131
1132 /* rport and tgt are allocated together, so tgt should be non-NULL */
1133 fcport = (struct qedf_rport *)&rp[1];
1134
1135 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1136 /*
1137 * Session is not offloaded yet. Let SCSI-ml retry
1138 * the command.
1139 */
1140 rc = SCSI_MLQUEUE_TARGET_BUSY;
1141 goto exit_qcmd;
1142 }
1143 if (fcport->retry_delay_timestamp) {
1144 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
1145 fcport->retry_delay_timestamp = 0;
1146 } else {
1147 /* If retry_delay timer is active, flow off the ML */
1148 rc = SCSI_MLQUEUE_TARGET_BUSY;
1149 goto exit_qcmd;
1150 }
1151 }
1152
1153 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1154 if (!io_req) {
1155 rc = SCSI_MLQUEUE_HOST_BUSY;
1156 goto exit_qcmd;
1157 }
1158
1159 io_req->sc_cmd = sc_cmd;
1160
1161 /* Take fcport->rport_lock for posting to fcport send queue */
1162 spin_lock_irqsave(&fcport->rport_lock, flags);
1163 if (qedf_post_io_req(fcport, io_req)) {
1164 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1165 /* Return SQE to pool */
1166 atomic_inc(&fcport->free_sqes);
1167 rc = SCSI_MLQUEUE_HOST_BUSY;
1168 }
1169 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1170
1171exit_qcmd:
1172 return rc;
1173}
1174
1175static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1176 struct fcoe_cqe_rsp_info *fcp_rsp)
1177{
1178 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1179 struct qedf_ctx *qedf = io_req->fcport->qedf;
1180 u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1181 int fcp_sns_len = 0;
1182 int fcp_rsp_len = 0;
1183 uint8_t *rsp_info, *sense_data;
1184
1185 io_req->fcp_status = FC_GOOD;
1186 io_req->fcp_resid = 0;
1187 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1188 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1189 io_req->fcp_resid = fcp_rsp->fcp_resid;
1190
1191 io_req->scsi_comp_flags = rsp_flags;
1192 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1193 fcp_rsp->scsi_status_code;
1194
1195 if (rsp_flags &
1196 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1197 fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1198
1199 if (rsp_flags &
1200 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1201 fcp_sns_len = fcp_rsp->fcp_sns_len;
1202
1203 io_req->fcp_rsp_len = fcp_rsp_len;
1204 io_req->fcp_sns_len = fcp_sns_len;
1205 rsp_info = sense_data = io_req->sense_buffer;
1206
1207 /* fetch fcp_rsp_code */
1208 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1209 /* Only for task management function */
1210 io_req->fcp_rsp_code = rsp_info[3];
1211 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1212 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1213 /* Adjust sense-data location. */
1214 sense_data += fcp_rsp_len;
1215 }
1216
1217 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1218 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1219 "Truncating sense buffer\n");
1220 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1221 }
1222
1223 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1224 if (fcp_sns_len)
1225 memcpy(sc_cmd->sense_buffer, sense_data,
1226 fcp_sns_len);
1227}
1228
1229static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1230{
1231 struct scsi_cmnd *sc = io_req->sc_cmd;
1232
1233 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1234 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1235 scsi_sg_count(sc), sc->sc_data_direction);
1236 io_req->bd_tbl->bd_valid = 0;
1237 }
1238}
1239
1240void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1241 struct qedf_ioreq *io_req)
1242{
1243 u16 xid, rval;
1244 struct fcoe_task_context *task_ctx;
1245 struct scsi_cmnd *sc_cmd;
1246 struct fcoe_cqe_rsp_info *fcp_rsp;
1247 struct qedf_rport *fcport;
1248 int refcount;
1249 u16 scope, qualifier = 0;
1250 u8 fw_residual_flag = 0;
1251
1252 if (!io_req)
1253 return;
1254 if (!cqe)
1255 return;
1256
1257 xid = io_req->xid;
1258 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1259 sc_cmd = io_req->sc_cmd;
1260 fcp_rsp = &cqe->cqe_info.rsp_info;
1261
1262 if (!sc_cmd) {
1263 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1264 return;
1265 }
1266
1267 if (!sc_cmd->SCp.ptr) {
1268 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1269 "another context.\n");
1270 return;
1271 }
1272
1273 if (!sc_cmd->request) {
1274 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1275 "sc_cmd=%p.\n", sc_cmd);
1276 return;
1277 }
1278
1279 if (!sc_cmd->request->special) {
1280 QEDF_WARN(&(qedf->dbg_ctx), "request->special is NULL so "
1281 "request not valid, sc_cmd=%p.\n", sc_cmd);
1282 return;
1283 }
1284
1285 if (!sc_cmd->request->q) {
1286 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1287 "is not valid, sc_cmd=%p.\n", sc_cmd);
1288 return;
1289 }
1290
1291 fcport = io_req->fcport;
1292
1293 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1294
1295 qedf_unmap_sg_list(qedf, io_req);
1296
1297 /* Check for FCP transport error */
1298 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1299 QEDF_ERR(&(qedf->dbg_ctx),
1300 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1301 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1302 io_req->fcp_rsp_code);
1303 sc_cmd->result = DID_BUS_BUSY << 16;
1304 goto out;
1305 }
1306
1307 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1308 FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1309 if (fw_residual_flag) {
1310 QEDF_ERR(&(qedf->dbg_ctx),
1311 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
1312 "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid,
1313 fcp_rsp->rsp_flags.flags, io_req->fcp_resid,
1314 cqe->cqe_info.rsp_info.fw_residual);
1315
1316 if (io_req->cdb_status == 0)
1317 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1318 else
1319 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1320
1321 /* Abort the command since we did not get all the data */
1322 init_completion(&io_req->abts_done);
1323 rval = qedf_initiate_abts(io_req, true);
1324 if (rval) {
1325 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1326 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1327 }
1328
1329 /*
1330 * Set resid to the whole buffer length so we won't try to resue
1331 * any previously data.
1332 */
1333 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1334 goto out;
1335 }
1336
1337 switch (io_req->fcp_status) {
1338 case FC_GOOD:
1339 if (io_req->cdb_status == 0) {
1340 /* Good I/O completion */
1341 sc_cmd->result = DID_OK << 16;
1342 } else {
1343 refcount = atomic_read(&io_req->refcount.refcount);
1344 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1345 "%d:0:%d:%d xid=0x%0x op=0x%02x "
1346 "lba=%02x%02x%02x%02x cdb_status=%d "
1347 "fcp_resid=0x%x refcount=%d.\n",
1348 qedf->lport->host->host_no, sc_cmd->device->id,
1349 sc_cmd->device->lun, io_req->xid,
1350 sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1351 sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1352 io_req->cdb_status, io_req->fcp_resid,
1353 refcount);
1354 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1355
1356 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1357 io_req->cdb_status == SAM_STAT_BUSY) {
1358 /*
1359 * Check whether we need to set retry_delay at
1360 * all based on retry_delay module parameter
1361 * and the status qualifier.
1362 */
1363
1364 /* Upper 2 bits */
1365 scope = fcp_rsp->retry_delay_timer & 0xC000;
1366 /* Lower 14 bits */
1367 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1368
1369 if (qedf_retry_delay &&
1370 scope > 0 && qualifier > 0 &&
1371 qualifier <= 0x3FEF) {
1372 /* Check we don't go over the max */
1373 if (qualifier > QEDF_RETRY_DELAY_MAX)
1374 qualifier =
1375 QEDF_RETRY_DELAY_MAX;
1376 fcport->retry_delay_timestamp =
1377 jiffies + (qualifier * HZ / 10);
1378 }
1379 }
1380 }
1381 if (io_req->fcp_resid)
1382 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1383 break;
1384 default:
1385 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1386 io_req->fcp_status);
1387 break;
1388 }
1389
1390out:
1391 if (qedf_io_tracing)
1392 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1393
1394 io_req->sc_cmd = NULL;
1395 sc_cmd->SCp.ptr = NULL;
1396 sc_cmd->scsi_done(sc_cmd);
1397 kref_put(&io_req->refcount, qedf_release_cmd);
1398}
1399
1400/* Return a SCSI command in some other context besides a normal completion */
1401void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1402 int result)
1403{
1404 u16 xid;
1405 struct scsi_cmnd *sc_cmd;
1406 int refcount;
1407
1408 if (!io_req)
1409 return;
1410
1411 xid = io_req->xid;
1412 sc_cmd = io_req->sc_cmd;
1413
1414 if (!sc_cmd) {
1415 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1416 return;
1417 }
1418
1419 if (!sc_cmd->SCp.ptr) {
1420 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1421 "another context.\n");
1422 return;
1423 }
1424
1425 qedf_unmap_sg_list(qedf, io_req);
1426
1427 sc_cmd->result = result << 16;
1428 refcount = atomic_read(&io_req->refcount.refcount);
1429 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%d: Completing "
1430 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1431 "allowed=%d retries=%d refcount=%d.\n",
1432 qedf->lport->host->host_no, sc_cmd->device->id,
1433 sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1434 sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1435 sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1436 refcount);
1437
1438 /*
1439 * Set resid to the whole buffer length so we won't try to resue any
1440 * previously read data
1441 */
1442 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1443
1444 if (qedf_io_tracing)
1445 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1446
1447 io_req->sc_cmd = NULL;
1448 sc_cmd->SCp.ptr = NULL;
1449 sc_cmd->scsi_done(sc_cmd);
1450 kref_put(&io_req->refcount, qedf_release_cmd);
1451}
1452
1453/*
1454 * Handle warning type CQE completions. This is mainly used for REC timer
1455 * popping.
1456 */
1457void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1458 struct qedf_ioreq *io_req)
1459{
1460 int rval, i;
1461 struct qedf_rport *fcport = io_req->fcport;
1462 u64 err_warn_bit_map;
1463 u8 err_warn = 0xff;
1464
1465 if (!cqe)
1466 return;
1467
1468 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1469 "xid=0x%x\n", io_req->xid);
1470 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1471 "err_warn_bitmap=%08x:%08x\n",
1472 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1473 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1474 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1475 "rx_buff_off=%08x, rx_id=%04x\n",
1476 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1477 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1478 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1479
1480 /* Normalize the error bitmap value to an just an unsigned int */
1481 err_warn_bit_map = (u64)
1482 ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1483 (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1484 for (i = 0; i < 64; i++) {
1485 if (err_warn_bit_map & (u64)((u64)1 << i)) {
1486 err_warn = i;
1487 break;
1488 }
1489 }
1490
1491 /* Check if REC TOV expired if this is a tape device */
1492 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1493 if (err_warn ==
1494 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1495 QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1496 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1497 io_req->rx_buf_off =
1498 cqe->cqe_info.err_info.rx_buf_off;
1499 io_req->tx_buf_off =
1500 cqe->cqe_info.err_info.tx_buf_off;
1501 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1502 rval = qedf_send_rec(io_req);
1503 /*
1504 * We only want to abort the io_req if we
1505 * can't queue the REC command as we want to
1506 * keep the exchange open for recovery.
1507 */
1508 if (rval)
1509 goto send_abort;
1510 }
1511 return;
1512 }
1513 }
1514
1515send_abort:
1516 init_completion(&io_req->abts_done);
1517 rval = qedf_initiate_abts(io_req, true);
1518 if (rval)
1519 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1520}
1521
1522/* Cleanup a command when we receive an error detection completion */
1523void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1524 struct qedf_ioreq *io_req)
1525{
1526 int rval;
1527
1528 if (!cqe)
1529 return;
1530
1531 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1532 "xid=0x%x\n", io_req->xid);
1533 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1534 "err_warn_bitmap=%08x:%08x\n",
1535 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1536 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1537 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1538 "rx_buff_off=%08x, rx_id=%04x\n",
1539 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1540 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1541 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1542
1543 if (qedf->stop_io_on_error) {
1544 qedf_stop_all_io(qedf);
1545 return;
1546 }
1547
1548 init_completion(&io_req->abts_done);
1549 rval = qedf_initiate_abts(io_req, true);
1550 if (rval)
1551 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1552}
1553
1554static void qedf_flush_els_req(struct qedf_ctx *qedf,
1555 struct qedf_ioreq *els_req)
1556{
1557 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1558 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
1559 atomic_read(&els_req->refcount.refcount));
1560
1561 /*
1562 * Need to distinguish this from a timeout when calling the
1563 * els_req->cb_func.
1564 */
1565 els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1566
1567 /* Cancel the timer */
1568 cancel_delayed_work_sync(&els_req->timeout_work);
1569
1570 /* Call callback function to complete command */
1571 if (els_req->cb_func && els_req->cb_arg) {
1572 els_req->cb_func(els_req->cb_arg);
1573 els_req->cb_arg = NULL;
1574 }
1575
1576 /* Release kref for original initiate_els */
1577 kref_put(&els_req->refcount, qedf_release_cmd);
1578}
1579
1580/* A value of -1 for lun is a wild card that means flush all
1581 * active SCSI I/Os for the target.
1582 */
1583void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1584{
1585 struct qedf_ioreq *io_req;
1586 struct qedf_ctx *qedf;
1587 struct qedf_cmd_mgr *cmd_mgr;
1588 int i, rc;
1589
1590 if (!fcport)
1591 return;
1592
1593 qedf = fcport->qedf;
1594 cmd_mgr = qedf->cmd_mgr;
1595
1596 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n");
1597
1598 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1599 io_req = &cmd_mgr->cmds[i];
1600
1601 if (!io_req)
1602 continue;
1603 if (io_req->fcport != fcport)
1604 continue;
1605 if (io_req->cmd_type == QEDF_ELS) {
1606 rc = kref_get_unless_zero(&io_req->refcount);
1607 if (!rc) {
1608 QEDF_ERR(&(qedf->dbg_ctx),
1609 "Could not get kref for io_req=0x%p.\n",
1610 io_req);
1611 continue;
1612 }
1613 qedf_flush_els_req(qedf, io_req);
1614 /*
1615 * Release the kref and go back to the top of the
1616 * loop.
1617 */
1618 goto free_cmd;
1619 }
1620
1621 if (!io_req->sc_cmd)
1622 continue;
1623 if (lun > 0) {
1624 if (io_req->sc_cmd->device->lun !=
1625 (u64)lun)
1626 continue;
1627 }
1628
1629 /*
1630 * Use kref_get_unless_zero in the unlikely case the command
1631 * we're about to flush was completed in the normal SCSI path
1632 */
1633 rc = kref_get_unless_zero(&io_req->refcount);
1634 if (!rc) {
1635 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1636 "io_req=0x%p\n", io_req);
1637 continue;
1638 }
1639 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1640 "Cleanup xid=0x%x.\n", io_req->xid);
1641
1642 /* Cleanup task and return I/O mid-layer */
1643 qedf_initiate_cleanup(io_req, true);
1644
1645free_cmd:
1646 kref_put(&io_req->refcount, qedf_release_cmd);
1647 }
1648}
1649
1650/*
1651 * Initiate a ABTS middle path command. Note that we don't have to initialize
1652 * the task context for an ABTS task.
1653 */
1654int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1655{
1656 struct fc_lport *lport;
1657 struct qedf_rport *fcport = io_req->fcport;
1658 struct fc_rport_priv *rdata = fcport->rdata;
1659 struct qedf_ctx *qedf = fcport->qedf;
1660 u16 xid;
1661 u32 r_a_tov = 0;
1662 int rc = 0;
1663 unsigned long flags;
1664
1665 r_a_tov = rdata->r_a_tov;
1666 lport = qedf->lport;
1667
1668 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1669 QEDF_ERR(&(qedf->dbg_ctx), "tgt not offloaded\n");
1670 rc = 1;
1671 goto abts_err;
1672 }
1673
1674 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1675 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1676 rc = 1;
1677 goto abts_err;
1678 }
1679
1680 if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1681 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1682 rc = 1;
1683 goto abts_err;
1684 }
1685
1686 /* Ensure room on SQ */
1687 if (!atomic_read(&fcport->free_sqes)) {
1688 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1689 rc = 1;
1690 goto abts_err;
1691 }
1692
1693
1694 kref_get(&io_req->refcount);
1695
1696 xid = io_req->xid;
1697 qedf->control_requests++;
1698 qedf->packet_aborts++;
1699
1700 /* Set the return CPU to be the same as the request one */
1701 io_req->cpu = smp_processor_id();
1702
1703 /* Set the command type to abort */
1704 io_req->cmd_type = QEDF_ABTS;
1705 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1706
1707 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1708 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = "
1709 "0x%x\n", xid);
1710
1711 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ);
1712
1713 spin_lock_irqsave(&fcport->rport_lock, flags);
1714
1715 /* Add ABTS to send queue */
1716 qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_ABTS, 0);
1717
1718 /* Ring doorbell */
1719 qedf_ring_doorbell(fcport);
1720
1721 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1722
1723 return rc;
1724abts_err:
1725 /*
1726 * If the ABTS task fails to queue then we need to cleanup the
1727 * task at the firmware.
1728 */
1729 qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts);
1730 return rc;
1731}
1732
1733void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1734 struct qedf_ioreq *io_req)
1735{
1736 uint32_t r_ctl;
1737 uint16_t xid;
1738
1739 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1740 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1741
1742 cancel_delayed_work(&io_req->timeout_work);
1743
1744 xid = io_req->xid;
1745 r_ctl = cqe->cqe_info.abts_info.r_ctl;
1746
1747 switch (r_ctl) {
1748 case FC_RCTL_BA_ACC:
1749 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1750 "ABTS response - ACC Send RRQ after R_A_TOV\n");
1751 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
1752 /*
1753 * Dont release this cmd yet. It will be relesed
1754 * after we get RRQ response
1755 */
1756 kref_get(&io_req->refcount);
1757 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
1758 msecs_to_jiffies(qedf->lport->r_a_tov));
1759 break;
1760 /* For error cases let the cleanup return the command */
1761 case FC_RCTL_BA_RJT:
1762 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1763 "ABTS response - RJT\n");
1764 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
1765 break;
1766 default:
1767 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
1768 break;
1769 }
1770
1771 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1772
1773 if (io_req->sc_cmd) {
1774 if (io_req->return_scsi_cmd_on_abts)
1775 qedf_scsi_done(qedf, io_req, DID_ERROR);
1776 }
1777
1778 /* Notify eh_abort handler that ABTS is complete */
1779 complete(&io_req->abts_done);
1780
1781 kref_put(&io_req->refcount, qedf_release_cmd);
1782}
1783
1784int qedf_init_mp_req(struct qedf_ioreq *io_req)
1785{
1786 struct qedf_mp_req *mp_req;
1787 struct fcoe_sge *mp_req_bd;
1788 struct fcoe_sge *mp_resp_bd;
1789 struct qedf_ctx *qedf = io_req->fcport->qedf;
1790 dma_addr_t addr;
1791 uint64_t sz;
1792
1793 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
1794
1795 mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
1796 memset(mp_req, 0, sizeof(struct qedf_mp_req));
1797
1798 if (io_req->cmd_type != QEDF_ELS) {
1799 mp_req->req_len = sizeof(struct fcp_cmnd);
1800 io_req->data_xfer_len = mp_req->req_len;
1801 } else
1802 mp_req->req_len = io_req->data_xfer_len;
1803
1804 mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
1805 &mp_req->req_buf_dma, GFP_KERNEL);
1806 if (!mp_req->req_buf) {
1807 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
1808 qedf_free_mp_resc(io_req);
1809 return -ENOMEM;
1810 }
1811
1812 mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
1813 QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
1814 if (!mp_req->resp_buf) {
1815 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
1816 "buffer\n");
1817 qedf_free_mp_resc(io_req);
1818 return -ENOMEM;
1819 }
1820
1821 /* Allocate and map mp_req_bd and mp_resp_bd */
1822 sz = sizeof(struct fcoe_sge);
1823 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1824 &mp_req->mp_req_bd_dma, GFP_KERNEL);
1825 if (!mp_req->mp_req_bd) {
1826 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
1827 qedf_free_mp_resc(io_req);
1828 return -ENOMEM;
1829 }
1830
1831 mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1832 &mp_req->mp_resp_bd_dma, GFP_KERNEL);
1833 if (!mp_req->mp_resp_bd) {
1834 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
1835 qedf_free_mp_resc(io_req);
1836 return -ENOMEM;
1837 }
1838
1839 /* Fill bd table */
1840 addr = mp_req->req_buf_dma;
1841 mp_req_bd = mp_req->mp_req_bd;
1842 mp_req_bd->sge_addr.lo = U64_LO(addr);
1843 mp_req_bd->sge_addr.hi = U64_HI(addr);
1844 mp_req_bd->size = QEDF_PAGE_SIZE;
1845
1846 /*
1847 * MP buffer is either a task mgmt command or an ELS.
1848 * So the assumption is that it consumes a single bd
1849 * entry in the bd table
1850 */
1851 mp_resp_bd = mp_req->mp_resp_bd;
1852 addr = mp_req->resp_buf_dma;
1853 mp_resp_bd->sge_addr.lo = U64_LO(addr);
1854 mp_resp_bd->sge_addr.hi = U64_HI(addr);
1855 mp_resp_bd->size = QEDF_PAGE_SIZE;
1856
1857 return 0;
1858}
1859
1860/*
1861 * Last ditch effort to clear the port if it's stuck. Used only after a
1862 * cleanup task times out.
1863 */
1864static void qedf_drain_request(struct qedf_ctx *qedf)
1865{
1866 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
1867 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
1868 return;
1869 }
1870
1871 /* Set bit to return all queuecommand requests as busy */
1872 set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
1873
1874 /* Call qed drain request for function. Should be synchronous */
1875 qed_ops->common->drain(qedf->cdev);
1876
1877 /* Settle time for CQEs to be returned */
1878 msleep(100);
1879
1880 /* Unplug and continue */
1881 clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
1882}
1883
1884/*
1885 * Returns SUCCESS if the cleanup task does not timeout, otherwise return
1886 * FAILURE.
1887 */
1888int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
1889 bool return_scsi_cmd_on_abts)
1890{
1891 struct qedf_rport *fcport;
1892 struct qedf_ctx *qedf;
1893 uint16_t xid;
1894 struct fcoe_task_context *task;
1895 int tmo = 0;
1896 int rc = SUCCESS;
1897 unsigned long flags;
1898
1899 fcport = io_req->fcport;
1900 if (!fcport) {
1901 QEDF_ERR(NULL, "fcport is NULL.\n");
1902 return SUCCESS;
1903 }
1904
1905 qedf = fcport->qedf;
1906 if (!qedf) {
1907 QEDF_ERR(NULL, "qedf is NULL.\n");
1908 return SUCCESS;
1909 }
1910
1911 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1912 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
1913 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
1914 "cleanup processing or already completed.\n",
1915 io_req->xid);
1916 return SUCCESS;
1917 }
1918
1919 /* Ensure room on SQ */
1920 if (!atomic_read(&fcport->free_sqes)) {
1921 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1922 return FAILED;
1923 }
1924
1925
1926 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n",
1927 io_req->xid);
1928
1929 /* Cleanup cmds re-use the same TID as the original I/O */
1930 xid = io_req->xid;
1931 io_req->cmd_type = QEDF_CLEANUP;
1932 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1933
1934 /* Set the return CPU to be the same as the request one */
1935 io_req->cpu = smp_processor_id();
1936
1937 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1938
1939 task = qedf_get_task_mem(&qedf->tasks, xid);
1940
1941 init_completion(&io_req->tm_done);
1942
1943 /* Obtain free SQ entry */
1944 spin_lock_irqsave(&fcport->rport_lock, flags);
1945 qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_EXCHANGE_CLEANUP, 0);
1946
1947 /* Ring doorbell */
1948 qedf_ring_doorbell(fcport);
1949 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1950
1951 tmo = wait_for_completion_timeout(&io_req->tm_done,
1952 QEDF_CLEANUP_TIMEOUT * HZ);
1953
1954 if (!tmo) {
1955 rc = FAILED;
1956 /* Timeout case */
1957 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
1958 "xid=%x.\n", io_req->xid);
1959 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1960 /* Issue a drain request if cleanup task times out */
1961 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
1962 qedf_drain_request(qedf);
1963 }
1964
1965 if (io_req->sc_cmd) {
1966 if (io_req->return_scsi_cmd_on_abts)
1967 qedf_scsi_done(qedf, io_req, DID_ERROR);
1968 }
1969
1970 if (rc == SUCCESS)
1971 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
1972 else
1973 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
1974
1975 return rc;
1976}
1977
1978void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1979 struct qedf_ioreq *io_req)
1980{
1981 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
1982 io_req->xid);
1983
1984 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1985
1986 /* Complete so we can finish cleaning up the I/O */
1987 complete(&io_req->tm_done);
1988}
1989
1990static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
1991 uint8_t tm_flags)
1992{
1993 struct qedf_ioreq *io_req;
1994 struct qedf_mp_req *tm_req;
1995 struct fcoe_task_context *task;
1996 struct fc_frame_header *fc_hdr;
1997 struct fcp_cmnd *fcp_cmnd;
1998 struct qedf_ctx *qedf = fcport->qedf;
1999 int rc = 0;
2000 uint16_t xid;
2001 uint32_t sid, did;
2002 int tmo = 0;
2003 unsigned long flags;
2004
2005 if (!sc_cmd) {
2006 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
2007 return FAILED;
2008 }
2009
2010 if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
2011 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
2012 rc = FAILED;
2013 return FAILED;
2014 }
2015
2016 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x "
2017 "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags);
2018
2019 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2020 if (!io_req) {
2021 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
2022 rc = -EAGAIN;
2023 goto reset_tmf_err;
2024 }
2025
2026 /* Initialize rest of io_req fields */
2027 io_req->sc_cmd = sc_cmd;
2028 io_req->fcport = fcport;
2029 io_req->cmd_type = QEDF_TASK_MGMT_CMD;
2030
2031 /* Set the return CPU to be the same as the request one */
2032 io_req->cpu = smp_processor_id();
2033
2034 tm_req = (struct qedf_mp_req *)&(io_req->mp_req);
2035
2036 rc = qedf_init_mp_req(io_req);
2037 if (rc == FAILED) {
2038 QEDF_ERR(&(qedf->dbg_ctx), "Task mgmt MP request init "
2039 "failed\n");
2040 kref_put(&io_req->refcount, qedf_release_cmd);
2041 goto reset_tmf_err;
2042 }
2043
2044 /* Set TM flags */
2045 io_req->io_req_flags = 0;
2046 tm_req->tm_flags = tm_flags;
2047
2048 /* Default is to return a SCSI command when an error occurs */
2049 io_req->return_scsi_cmd_on_abts = true;
2050
2051 /* Fill FCP_CMND */
2052 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
2053 fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
2054 memset(fcp_cmnd->fc_cdb, 0, FCP_CMND_LEN);
2055 fcp_cmnd->fc_dl = 0;
2056
2057 /* Fill FC header */
2058 fc_hdr = &(tm_req->req_fc_hdr);
2059 sid = fcport->sid;
2060 did = fcport->rdata->ids.port_id;
2061 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, sid, did,
2062 FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
2063 FC_FC_SEQ_INIT, 0);
2064 /* Obtain exchange id */
2065 xid = io_req->xid;
2066
2067 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2068 "0x%x\n", xid);
2069
2070 /* Initialize task context for this IO request */
2071 task = qedf_get_task_mem(&qedf->tasks, xid);
2072 qedf_init_mp_task(io_req, task);
2073
2074 init_completion(&io_req->tm_done);
2075
2076 /* Obtain free SQ entry */
2077 spin_lock_irqsave(&fcport->rport_lock, flags);
2078 qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
2079
2080 /* Ring doorbell */
2081 qedf_ring_doorbell(fcport);
2082 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2083
2084 tmo = wait_for_completion_timeout(&io_req->tm_done,
2085 QEDF_TM_TIMEOUT * HZ);
2086
2087 if (!tmo) {
2088 rc = FAILED;
2089 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
2090 } else {
2091 /* Check TMF response code */
2092 if (io_req->fcp_rsp_code == 0)
2093 rc = SUCCESS;
2094 else
2095 rc = FAILED;
2096 }
2097
2098 if (tm_flags == FCP_TMF_LUN_RESET)
2099 qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun);
2100 else
2101 qedf_flush_active_ios(fcport, -1);
2102
2103 kref_put(&io_req->refcount, qedf_release_cmd);
2104
2105 if (rc != SUCCESS) {
2106 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2107 rc = FAILED;
2108 } else {
2109 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2110 rc = SUCCESS;
2111 }
2112reset_tmf_err:
2113 return rc;
2114}
2115
2116int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2117{
2118 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2119 struct fc_rport_libfc_priv *rp = rport->dd_data;
2120 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2121 struct qedf_ctx *qedf;
2122 struct fc_lport *lport;
2123 int rc = SUCCESS;
2124 int rval;
2125
2126 rval = fc_remote_port_chkready(rport);
2127
2128 if (rval) {
2129 QEDF_ERR(NULL, "device_reset rport not ready\n");
2130 rc = FAILED;
2131 goto tmf_err;
2132 }
2133
2134 if (fcport == NULL) {
2135 QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2136 rc = FAILED;
2137 goto tmf_err;
2138 }
2139
2140 qedf = fcport->qedf;
2141 lport = qedf->lport;
2142
2143 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2144 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2145 rc = SUCCESS;
2146 goto tmf_err;
2147 }
2148
2149 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2150 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2151 rc = FAILED;
2152 goto tmf_err;
2153 }
2154
2155 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2156
2157tmf_err:
2158 return rc;
2159}
2160
2161void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2162 struct qedf_ioreq *io_req)
2163{
2164 struct fcoe_cqe_rsp_info *fcp_rsp;
2165 struct fcoe_cqe_midpath_info *mp_info;
2166
2167
2168 /* Get TMF response length from CQE */
2169 mp_info = &cqe->cqe_info.midpath_info;
2170 io_req->mp_req.resp_len = mp_info->data_placement_size;
2171 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2172 "Response len is %d.\n", io_req->mp_req.resp_len);
2173
2174 fcp_rsp = &cqe->cqe_info.rsp_info;
2175 qedf_parse_fcp_rsp(io_req, fcp_rsp);
2176
2177 io_req->sc_cmd = NULL;
2178 complete(&io_req->tm_done);
2179}
2180
2181void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2182 struct fcoe_cqe *cqe)
2183{
2184 unsigned long flags;
2185 uint16_t tmp;
2186 uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2187 u32 payload_len, crc;
2188 struct fc_frame_header *fh;
2189 struct fc_frame *fp;
2190 struct qedf_io_work *io_work;
2191 u32 bdq_idx;
2192 void *bdq_addr;
2193
2194 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2195 "address.hi=%x address.lo=%x opaque_data.hi=%x "
2196 "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n",
2197 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi),
2198 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo),
2199 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi),
2200 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo),
2201 qedf->bdq_prod_idx, pktlen);
2202
2203 bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo);
2204 if (bdq_idx >= QEDF_BDQ_SIZE) {
2205 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2206 bdq_idx);
2207 goto increment_prod;
2208 }
2209
2210 bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2211 if (!bdq_addr) {
2212 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2213 "unsolicited packet.\n");
2214 goto increment_prod;
2215 }
2216
2217 if (qedf_dump_frames) {
2218 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2219 "BDQ frame is at addr=%p.\n", bdq_addr);
2220 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2221 (void *)bdq_addr, pktlen, false);
2222 }
2223
2224 /* Allocate frame */
2225 payload_len = pktlen - sizeof(struct fc_frame_header);
2226 fp = fc_frame_alloc(qedf->lport, payload_len);
2227 if (!fp) {
2228 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2229 goto increment_prod;
2230 }
2231
2232 /* Copy data from BDQ buffer into fc_frame struct */
2233 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2234 memcpy(fh, (void *)bdq_addr, pktlen);
2235
2236 /* Initialize the frame so libfc sees it as a valid frame */
2237 crc = fcoe_fc_crc(fp);
2238 fc_frame_init(fp);
2239 fr_dev(fp) = qedf->lport;
2240 fr_sof(fp) = FC_SOF_I3;
2241 fr_eof(fp) = FC_EOF_T;
2242 fr_crc(fp) = cpu_to_le32(~crc);
2243
2244 /*
2245 * We need to return the frame back up to libfc in a non-atomic
2246 * context
2247 */
2248 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2249 if (!io_work) {
2250 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2251 "work for I/O completion.\n");
2252 fc_frame_free(fp);
2253 goto increment_prod;
2254 }
2255 memset(io_work, 0, sizeof(struct qedf_io_work));
2256
2257 INIT_WORK(&io_work->work, qedf_fp_io_handler);
2258
2259 /* Copy contents of CQE for deferred processing */
2260 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2261
2262 io_work->qedf = qedf;
2263 io_work->fp = fp;
2264
2265 queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2266increment_prod:
2267 spin_lock_irqsave(&qedf->hba_lock, flags);
2268
2269 /* Increment producer to let f/w know we've handled the frame */
2270 qedf->bdq_prod_idx++;
2271
2272 /* Producer index wraps at uint16_t boundary */
2273 if (qedf->bdq_prod_idx == 0xffff)
2274 qedf->bdq_prod_idx = 0;
2275
2276 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2277 tmp = readw(qedf->bdq_primary_prod);
2278 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2279 tmp = readw(qedf->bdq_secondary_prod);
2280
2281 spin_unlock_irqrestore(&qedf->hba_lock, flags);
2282}
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
new file mode 100644
index 000000000000..d9d7a86b5f8b
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -0,0 +1,3336 @@
1/*
2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/device.h>
14#include <linux/highmem.h>
15#include <linux/crc32.h>
16#include <linux/interrupt.h>
17#include <linux/list.h>
18#include <linux/kthread.h>
19#include <scsi/libfc.h>
20#include <scsi/scsi_host.h>
21#include <linux/if_ether.h>
22#include <linux/if_vlan.h>
23#include <linux/cpu.h>
24#include "qedf.h"
25
26const struct qed_fcoe_ops *qed_ops;
27
28static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
29static void qedf_remove(struct pci_dev *pdev);
30
31extern struct qedf_debugfs_ops qedf_debugfs_ops;
32extern struct file_operations qedf_dbg_fops;
33
34/*
35 * Driver module parameters.
36 */
37static unsigned int qedf_dev_loss_tmo = 60;
38module_param_named(dev_loss_tmo, qedf_dev_loss_tmo, int, S_IRUGO);
39MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached "
40 "remote ports (default 60)");
41
42uint qedf_debug = QEDF_LOG_INFO;
43module_param_named(debug, qedf_debug, uint, S_IRUGO);
44MODULE_PARM_DESC(qedf_debug, " Debug mask. Pass '1' to enable default debugging"
45 " mask");
46
47static uint qedf_fipvlan_retries = 30;
48module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO);
49MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt "
50 "before giving up (default 30)");
51
52static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN;
53module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO);
54MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails "
55 "(default 1002).");
56
57static uint qedf_default_prio = QEDF_DEFAULT_PRIO;
58module_param_named(default_prio, qedf_default_prio, int, S_IRUGO);
59MODULE_PARM_DESC(default_prio, " Default 802.1q priority for FIP and FCoE"
60 " traffic (default 3).");
61
62uint qedf_dump_frames;
63module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR);
64MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames "
65 "(default off)");
66
67static uint qedf_queue_depth;
68module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO);
69MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered "
70 "by the qedf driver. Default is 0 (use OS default).");
71
72uint qedf_io_tracing;
73module_param_named(io_tracing, qedf_io_tracing, int, S_IRUGO | S_IWUSR);
74MODULE_PARM_DESC(io_tracing, " Enable logging of SCSI requests/completions "
75 "into trace buffer. (default off).");
76
77static uint qedf_max_lun = MAX_FIBRE_LUNS;
78module_param_named(max_lun, qedf_max_lun, int, S_IRUGO);
79MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver "
80 "supports. (default 0xffffffff)");
81
82uint qedf_link_down_tmo;
83module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO);
84MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the "
85 "link is down by N seconds.");
86
87bool qedf_retry_delay;
88module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR);
89MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry "
90 "delay handling (default off).");
91
92static uint qedf_dp_module;
93module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO);
94MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed "
95 "qed module during probe.");
96
97static uint qedf_dp_level;
98module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
99MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module "
100 "during probe (0-3: 0 more verbose).");
101
102struct workqueue_struct *qedf_io_wq;
103
104static struct fcoe_percpu_s qedf_global;
105static DEFINE_SPINLOCK(qedf_global_lock);
106
107static struct kmem_cache *qedf_io_work_cache;
108
109void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
110{
111 qedf->vlan_id = vlan_id;
112 qedf->vlan_id |= qedf_default_prio << VLAN_PRIO_SHIFT;
113 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x "
114 "prio=%d.\n", vlan_id, qedf_default_prio);
115}
116
117/* Returns true if we have a valid vlan, false otherwise */
118static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
119{
120 int rc;
121
122 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
123 QEDF_ERR(&(qedf->dbg_ctx), "Link not up.\n");
124 return false;
125 }
126
127 while (qedf->fipvlan_retries--) {
128 if (qedf->vlan_id > 0)
129 return true;
130 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
131 "Retry %d.\n", qedf->fipvlan_retries);
132 init_completion(&qedf->fipvlan_compl);
133 qedf_fcoe_send_vlan_req(qedf);
134 rc = wait_for_completion_timeout(&qedf->fipvlan_compl,
135 1 * HZ);
136 if (rc > 0) {
137 fcoe_ctlr_link_up(&qedf->ctlr);
138 return true;
139 }
140 }
141
142 return false;
143}
144
145static void qedf_handle_link_update(struct work_struct *work)
146{
147 struct qedf_ctx *qedf =
148 container_of(work, struct qedf_ctx, link_update.work);
149 int rc;
150
151 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Entered.\n");
152
153 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
154 rc = qedf_initiate_fipvlan_req(qedf);
155 if (rc)
156 return;
157 /*
158 * If we get here then we never received a repsonse to our
159 * fip vlan request so set the vlan_id to the default and
160 * tell FCoE that the link is up
161 */
162 QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN "
163 "response, falling back to default VLAN %d.\n",
164 qedf_fallback_vlan);
165 qedf_set_vlan_id(qedf, QEDF_FALLBACK_VLAN);
166
167 /*
168 * Zero out data_src_addr so we'll update it with the new
169 * lport port_id
170 */
171 eth_zero_addr(qedf->data_src_addr);
172 fcoe_ctlr_link_up(&qedf->ctlr);
173 } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
174 /*
175 * If we hit here and link_down_tmo_valid is still 1 it means
176 * that link_down_tmo timed out so set it to 0 to make sure any
177 * other readers have accurate state.
178 */
179 atomic_set(&qedf->link_down_tmo_valid, 0);
180 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
181 "Calling fcoe_ctlr_link_down().\n");
182 fcoe_ctlr_link_down(&qedf->ctlr);
183 qedf_wait_for_upload(qedf);
184 /* Reset the number of FIP VLAN retries */
185 qedf->fipvlan_retries = qedf_fipvlan_retries;
186 }
187}
188
189static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
190 void *arg)
191{
192 struct fc_exch *exch = fc_seq_exch(seq);
193 struct fc_lport *lport = exch->lp;
194 struct qedf_ctx *qedf = lport_priv(lport);
195
196 if (!qedf) {
197 QEDF_ERR(NULL, "qedf is NULL.\n");
198 return;
199 }
200
201 /*
202 * If ERR_PTR is set then don't try to stat anything as it will cause
203 * a crash when we access fp.
204 */
205 if (IS_ERR(fp)) {
206 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
207 "fp has IS_ERR() set.\n");
208 goto skip_stat;
209 }
210
211 /* Log stats for FLOGI reject */
212 if (fc_frame_payload_op(fp) == ELS_LS_RJT)
213 qedf->flogi_failed++;
214
215 /* Complete flogi_compl so we can proceed to sending ADISCs */
216 complete(&qedf->flogi_compl);
217
218skip_stat:
219 /* Report response to libfc */
220 fc_lport_flogi_resp(seq, fp, lport);
221}
222
223static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
224 struct fc_frame *fp, unsigned int op,
225 void (*resp)(struct fc_seq *,
226 struct fc_frame *,
227 void *),
228 void *arg, u32 timeout)
229{
230 struct qedf_ctx *qedf = lport_priv(lport);
231
232 /*
233 * Intercept FLOGI for statistic purposes. Note we use the resp
234 * callback to tell if this is really a flogi.
235 */
236 if (resp == fc_lport_flogi_resp) {
237 qedf->flogi_cnt++;
238 return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
239 arg, timeout);
240 }
241
242 return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
243}
244
245int qedf_send_flogi(struct qedf_ctx *qedf)
246{
247 struct fc_lport *lport;
248 struct fc_frame *fp;
249
250 lport = qedf->lport;
251
252 if (!lport->tt.elsct_send)
253 return -EINVAL;
254
255 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
256 if (!fp) {
257 QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n");
258 return -ENOMEM;
259 }
260
261 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
262 "Sending FLOGI to reestablish session with switch.\n");
263 lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
264 ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov);
265
266 init_completion(&qedf->flogi_compl);
267
268 return 0;
269}
270
271struct qedf_tmp_rdata_item {
272 struct fc_rport_priv *rdata;
273 struct list_head list;
274};
275
276/*
277 * This function is called if link_down_tmo is in use. If we get a link up and
278 * link_down_tmo has not expired then use just FLOGI/ADISC to recover our
279 * sessions with targets. Otherwise, just call fcoe_ctlr_link_up().
280 */
281static void qedf_link_recovery(struct work_struct *work)
282{
283 struct qedf_ctx *qedf =
284 container_of(work, struct qedf_ctx, link_recovery.work);
285 struct qedf_rport *fcport;
286 struct fc_rport_priv *rdata;
287 struct qedf_tmp_rdata_item *rdata_item, *tmp_rdata_item;
288 bool rc;
289 int retries = 30;
290 int rval, i;
291 struct list_head rdata_login_list;
292
293 INIT_LIST_HEAD(&rdata_login_list);
294
295 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
296 "Link down tmo did not expire.\n");
297
298 /*
299 * Essentially reset the fcoe_ctlr here without affecting the state
300 * of the libfc structs.
301 */
302 qedf->ctlr.state = FIP_ST_LINK_WAIT;
303 fcoe_ctlr_link_down(&qedf->ctlr);
304
305 /*
306 * Bring the link up before we send the fipvlan request so libfcoe
307 * can select a new fcf in parallel
308 */
309 fcoe_ctlr_link_up(&qedf->ctlr);
310
311 /* Since the link when down and up to verify which vlan we're on */
312 qedf->fipvlan_retries = qedf_fipvlan_retries;
313 rc = qedf_initiate_fipvlan_req(qedf);
314 if (!rc)
315 return;
316
317 /*
318 * We need to wait for an FCF to be selected due to the
319 * fcoe_ctlr_link_up other the FLOGI will be rejected.
320 */
321 while (retries > 0) {
322 if (qedf->ctlr.sel_fcf) {
323 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
324 "FCF reselected, proceeding with FLOGI.\n");
325 break;
326 }
327 msleep(500);
328 retries--;
329 }
330
331 if (retries < 1) {
332 QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for "
333 "FCF selection.\n");
334 return;
335 }
336
337 rval = qedf_send_flogi(qedf);
338 if (rval)
339 return;
340
341 /* Wait for FLOGI completion before proceeding with sending ADISCs */
342 i = wait_for_completion_timeout(&qedf->flogi_compl,
343 qedf->lport->r_a_tov);
344 if (i == 0) {
345 QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n");
346 return;
347 }
348
349 /*
350 * Call lport->tt.rport_login which will cause libfc to send an
351 * ADISC since the rport is in state ready.
352 */
353 rcu_read_lock();
354 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
355 rdata = fcport->rdata;
356 if (rdata == NULL)
357 continue;
358 rdata_item = kzalloc(sizeof(struct qedf_tmp_rdata_item),
359 GFP_ATOMIC);
360 if (!rdata_item)
361 continue;
362 if (kref_get_unless_zero(&rdata->kref)) {
363 rdata_item->rdata = rdata;
364 list_add(&rdata_item->list, &rdata_login_list);
365 } else
366 kfree(rdata_item);
367 }
368 rcu_read_unlock();
369 /*
370 * Do the fc_rport_login outside of the rcu lock so we don't take a
371 * mutex in an atomic context.
372 */
373 list_for_each_entry_safe(rdata_item, tmp_rdata_item, &rdata_login_list,
374 list) {
375 list_del(&rdata_item->list);
376 fc_rport_login(rdata_item->rdata);
377 kref_put(&rdata_item->rdata->kref, fc_rport_destroy);
378 kfree(rdata_item);
379 }
380}
381
382static void qedf_update_link_speed(struct qedf_ctx *qedf,
383 struct qed_link_output *link)
384{
385 struct fc_lport *lport = qedf->lport;
386
387 lport->link_speed = FC_PORTSPEED_UNKNOWN;
388 lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
389
390 /* Set fc_host link speed */
391 switch (link->speed) {
392 case 10000:
393 lport->link_speed = FC_PORTSPEED_10GBIT;
394 break;
395 case 25000:
396 lport->link_speed = FC_PORTSPEED_25GBIT;
397 break;
398 case 40000:
399 lport->link_speed = FC_PORTSPEED_40GBIT;
400 break;
401 case 50000:
402 lport->link_speed = FC_PORTSPEED_50GBIT;
403 break;
404 case 100000:
405 lport->link_speed = FC_PORTSPEED_100GBIT;
406 break;
407 default:
408 lport->link_speed = FC_PORTSPEED_UNKNOWN;
409 break;
410 }
411
412 /*
413 * Set supported link speed by querying the supported
414 * capabilities of the link.
415 */
416 if (link->supported_caps & SUPPORTED_10000baseKR_Full)
417 lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
418 if (link->supported_caps & SUPPORTED_25000baseKR_Full)
419 lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
420 if (link->supported_caps & SUPPORTED_40000baseLR4_Full)
421 lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
422 if (link->supported_caps & SUPPORTED_50000baseKR2_Full)
423 lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
424 if (link->supported_caps & SUPPORTED_100000baseKR4_Full)
425 lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
426 fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
427}
428
429static void qedf_link_update(void *dev, struct qed_link_output *link)
430{
431 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
432
433 if (link->link_up) {
434 QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n",
435 link->speed / 1000);
436
437 /* Cancel any pending link down work */
438 cancel_delayed_work(&qedf->link_update);
439
440 atomic_set(&qedf->link_state, QEDF_LINK_UP);
441 qedf_update_link_speed(qedf, link);
442
443 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
444 QEDF_ERR(&(qedf->dbg_ctx), "DCBx done.\n");
445 if (atomic_read(&qedf->link_down_tmo_valid) > 0)
446 queue_delayed_work(qedf->link_update_wq,
447 &qedf->link_recovery, 0);
448 else
449 queue_delayed_work(qedf->link_update_wq,
450 &qedf->link_update, 0);
451 atomic_set(&qedf->link_down_tmo_valid, 0);
452 }
453
454 } else {
455 QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n");
456
457 atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
458 atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
459 /*
460 * Flag that we're waiting for the link to come back up before
461 * informing the fcoe layer of the event.
462 */
463 if (qedf_link_down_tmo > 0) {
464 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
465 "Starting link down tmo.\n");
466 atomic_set(&qedf->link_down_tmo_valid, 1);
467 }
468 qedf->vlan_id = 0;
469 qedf_update_link_speed(qedf, link);
470 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
471 qedf_link_down_tmo * HZ);
472 }
473}
474
475
476static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
477{
478 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
479
480 QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe "
481 "prio=%d.\n", get->operational.valid, get->operational.enabled,
482 get->operational.app_prio.fcoe);
483
484 if (get->operational.enabled && get->operational.valid) {
485 /* If DCBX was already negotiated on link up then just exit */
486 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
487 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
488 "DCBX already set on link up.\n");
489 return;
490 }
491
492 atomic_set(&qedf->dcbx, QEDF_DCBX_DONE);
493
494 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
495 if (atomic_read(&qedf->link_down_tmo_valid) > 0)
496 queue_delayed_work(qedf->link_update_wq,
497 &qedf->link_recovery, 0);
498 else
499 queue_delayed_work(qedf->link_update_wq,
500 &qedf->link_update, 0);
501 atomic_set(&qedf->link_down_tmo_valid, 0);
502 }
503 }
504
505}
506
507static u32 qedf_get_login_failures(void *cookie)
508{
509 struct qedf_ctx *qedf;
510
511 qedf = (struct qedf_ctx *)cookie;
512 return qedf->flogi_failed;
513}
514
515static struct qed_fcoe_cb_ops qedf_cb_ops = {
516 {
517 .link_update = qedf_link_update,
518 .dcbx_aen = qedf_dcbx_handler,
519 }
520};
521
522/*
523 * Various transport templates.
524 */
525
526static struct scsi_transport_template *qedf_fc_transport_template;
527static struct scsi_transport_template *qedf_fc_vport_transport_template;
528
529/*
530 * SCSI EH handlers
531 */
532static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
533{
534 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
535 struct fc_rport_libfc_priv *rp = rport->dd_data;
536 struct qedf_rport *fcport;
537 struct fc_lport *lport;
538 struct qedf_ctx *qedf;
539 struct qedf_ioreq *io_req;
540 int rc = FAILED;
541 int rval;
542
543 if (fc_remote_port_chkready(rport)) {
544 QEDF_ERR(NULL, "rport not ready\n");
545 goto out;
546 }
547
548 lport = shost_priv(sc_cmd->device->host);
549 qedf = (struct qedf_ctx *)lport_priv(lport);
550
551 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
552 QEDF_ERR(&(qedf->dbg_ctx), "link not ready.\n");
553 goto out;
554 }
555
556 fcport = (struct qedf_rport *)&rp[1];
557
558 io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
559 if (!io_req) {
560 QEDF_ERR(&(qedf->dbg_ctx), "io_req is NULL.\n");
561 rc = SUCCESS;
562 goto out;
563 }
564
565 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
566 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
567 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
568 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
569 "cleanup or abort processing or already "
570 "completed.\n", io_req->xid);
571 rc = SUCCESS;
572 goto out;
573 }
574
575 QEDF_ERR(&(qedf->dbg_ctx), "Aborting io_req sc_cmd=%p xid=0x%x "
576 "fp_idx=%d.\n", sc_cmd, io_req->xid, io_req->fp_idx);
577
578 if (qedf->stop_io_on_error) {
579 qedf_stop_all_io(qedf);
580 rc = SUCCESS;
581 goto out;
582 }
583
584 init_completion(&io_req->abts_done);
585 rval = qedf_initiate_abts(io_req, true);
586 if (rval) {
587 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
588 goto out;
589 }
590
591 wait_for_completion(&io_req->abts_done);
592
593 if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS ||
594 io_req->event == QEDF_IOREQ_EV_ABORT_FAILED ||
595 io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) {
596 /*
597 * If we get a reponse to the abort this is success from
598 * the perspective that all references to the command have
599 * been removed from the driver and firmware
600 */
601 rc = SUCCESS;
602 } else {
603 /* If the abort and cleanup failed then return a failure */
604 rc = FAILED;
605 }
606
607 if (rc == SUCCESS)
608 QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n",
609 io_req->xid);
610 else
611 QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
612 io_req->xid);
613
614out:
615 return rc;
616}
617
618static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
619{
620 QEDF_ERR(NULL, "TARGET RESET Issued...");
621 return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
622}
623
624static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
625{
626 QEDF_ERR(NULL, "LUN RESET Issued...\n");
627 return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
628}
629
630void qedf_wait_for_upload(struct qedf_ctx *qedf)
631{
632 while (1) {
633 if (atomic_read(&qedf->num_offloads))
634 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
635 "Waiting for all uploads to complete.\n");
636 else
637 break;
638 msleep(500);
639 }
640}
641
642/* Reset the host by gracefully logging out and then logging back in */
643static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
644{
645 struct fc_lport *lport;
646 struct qedf_ctx *qedf;
647
648 lport = shost_priv(sc_cmd->device->host);
649
650 if (lport->vport) {
651 QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
652 return SUCCESS;
653 }
654
655 qedf = (struct qedf_ctx *)lport_priv(lport);
656
657 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN ||
658 test_bit(QEDF_UNLOADING, &qedf->flags) ||
659 test_bit(QEDF_DBG_STOP_IO, &qedf->flags))
660 return FAILED;
661
662 QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
663
664 /* For host reset, essentially do a soft link up/down */
665 atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
666 atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
667 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
668 0);
669 qedf_wait_for_upload(qedf);
670 atomic_set(&qedf->link_state, QEDF_LINK_UP);
671 qedf->vlan_id = 0;
672 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
673 0);
674
675 return SUCCESS;
676}
677
678static int qedf_slave_configure(struct scsi_device *sdev)
679{
680 if (qedf_queue_depth) {
681 scsi_change_queue_depth(sdev, qedf_queue_depth);
682 }
683
684 return 0;
685}
686
687static struct scsi_host_template qedf_host_template = {
688 .module = THIS_MODULE,
689 .name = QEDF_MODULE_NAME,
690 .this_id = -1,
691 .cmd_per_lun = 3,
692 .use_clustering = ENABLE_CLUSTERING,
693 .max_sectors = 0xffff,
694 .queuecommand = qedf_queuecommand,
695 .shost_attrs = qedf_host_attrs,
696 .eh_abort_handler = qedf_eh_abort,
697 .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
698 .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
699 .eh_host_reset_handler = qedf_eh_host_reset,
700 .slave_configure = qedf_slave_configure,
701 .dma_boundary = QED_HW_DMA_BOUNDARY,
702 .sg_tablesize = QEDF_MAX_BDS_PER_CMD,
703 .can_queue = FCOE_PARAMS_NUM_TASKS,
704};
705
706static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
707{
708 int rc;
709
710 spin_lock(&qedf_global_lock);
711 rc = fcoe_get_paged_crc_eof(skb, tlen, &qedf_global);
712 spin_unlock(&qedf_global_lock);
713
714 return rc;
715}
716
717static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id)
718{
719 struct qedf_rport *fcport;
720 struct fc_rport_priv *rdata;
721
722 rcu_read_lock();
723 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
724 rdata = fcport->rdata;
725 if (rdata == NULL)
726 continue;
727 if (rdata->ids.port_id == port_id) {
728 rcu_read_unlock();
729 return fcport;
730 }
731 }
732 rcu_read_unlock();
733
734 /* Return NULL to caller to let them know fcport was not found */
735 return NULL;
736}
737
738/* Transmits an ELS frame over an offloaded session */
739static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp)
740{
741 struct fc_frame_header *fh;
742 int rc = 0;
743
744 fh = fc_frame_header_get(fp);
745 if ((fh->fh_type == FC_TYPE_ELS) &&
746 (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
747 switch (fc_frame_payload_op(fp)) {
748 case ELS_ADISC:
749 qedf_send_adisc(fcport, fp);
750 rc = 1;
751 break;
752 }
753 }
754
755 return rc;
756}
757
758/**
759 * qedf_xmit - qedf FCoE frame transmit function
760 *
761 */
762static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
763{
764 struct fc_lport *base_lport;
765 struct qedf_ctx *qedf;
766 struct ethhdr *eh;
767 struct fcoe_crc_eof *cp;
768 struct sk_buff *skb;
769 struct fc_frame_header *fh;
770 struct fcoe_hdr *hp;
771 u8 sof, eof;
772 u32 crc;
773 unsigned int hlen, tlen, elen;
774 int wlen;
775 struct fc_stats *stats;
776 struct fc_lport *tmp_lport;
777 struct fc_lport *vn_port = NULL;
778 struct qedf_rport *fcport;
779 int rc;
780 u16 vlan_tci = 0;
781
782 qedf = (struct qedf_ctx *)lport_priv(lport);
783
784 fh = fc_frame_header_get(fp);
785 skb = fp_skb(fp);
786
787 /* Filter out traffic to other NPIV ports on the same host */
788 if (lport->vport)
789 base_lport = shost_priv(vport_to_shost(lport->vport));
790 else
791 base_lport = lport;
792
793 /* Flag if the destination is the base port */
794 if (base_lport->port_id == ntoh24(fh->fh_d_id)) {
795 vn_port = base_lport;
796 } else {
797 /* Got through the list of vports attached to the base_lport
798 * and see if we have a match with the destination address.
799 */
800 list_for_each_entry(tmp_lport, &base_lport->vports, list) {
801 if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) {
802 vn_port = tmp_lport;
803 break;
804 }
805 }
806 }
807 if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) {
808 struct fc_rport_priv *rdata = NULL;
809
810 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
811 "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
812 kfree_skb(skb);
813 rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
814 if (rdata)
815 rdata->retries = lport->max_rport_retry_count;
816 return -EINVAL;
817 }
818 /* End NPIV filtering */
819
820 if (!qedf->ctlr.sel_fcf) {
821 kfree_skb(skb);
822 return 0;
823 }
824
825 if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
826 QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
827 kfree_skb(skb);
828 return 0;
829 }
830
831 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
832 QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n");
833 kfree_skb(skb);
834 return 0;
835 }
836
837 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
838 if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb))
839 return 0;
840 }
841
842 /* Check to see if this needs to be sent on an offloaded session */
843 fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
844
845 if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
846 rc = qedf_xmit_l2_frame(fcport, fp);
847 /*
848 * If the frame was successfully sent over the middle path
849 * then do not try to also send it over the LL2 path
850 */
851 if (rc)
852 return 0;
853 }
854
855 sof = fr_sof(fp);
856 eof = fr_eof(fp);
857
858 elen = sizeof(struct ethhdr);
859 hlen = sizeof(struct fcoe_hdr);
860 tlen = sizeof(struct fcoe_crc_eof);
861 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
862
863 skb->ip_summed = CHECKSUM_NONE;
864 crc = fcoe_fc_crc(fp);
865
866 /* copy port crc and eof to the skb buff */
867 if (skb_is_nonlinear(skb)) {
868 skb_frag_t *frag;
869
870 if (qedf_get_paged_crc_eof(skb, tlen)) {
871 kfree_skb(skb);
872 return -ENOMEM;
873 }
874 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
875 cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
876 } else {
877 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
878 }
879
880 memset(cp, 0, sizeof(*cp));
881 cp->fcoe_eof = eof;
882 cp->fcoe_crc32 = cpu_to_le32(~crc);
883 if (skb_is_nonlinear(skb)) {
884 kunmap_atomic(cp);
885 cp = NULL;
886 }
887
888
889 /* adjust skb network/transport offsets to match mac/fcoe/port */
890 skb_push(skb, elen + hlen);
891 skb_reset_mac_header(skb);
892 skb_reset_network_header(skb);
893 skb->mac_len = elen;
894 skb->protocol = htons(ETH_P_FCOE);
895
896 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
897
898 /* fill up mac and fcoe headers */
899 eh = eth_hdr(skb);
900 eh->h_proto = htons(ETH_P_FCOE);
901 if (qedf->ctlr.map_dest)
902 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
903 else
904 /* insert GW address */
905 ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr);
906
907 /* Set the source MAC address */
908 fc_fcoe_set_mac(eh->h_source, fh->fh_s_id);
909
910 hp = (struct fcoe_hdr *)(eh + 1);
911 memset(hp, 0, sizeof(*hp));
912 if (FC_FCOE_VER)
913 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
914 hp->fcoe_sof = sof;
915
916 /*update tx stats */
917 stats = per_cpu_ptr(lport->stats, get_cpu());
918 stats->TxFrames++;
919 stats->TxWords += wlen;
920 put_cpu();
921
922 /* Get VLAN ID from skb for printing purposes */
923 __vlan_hwaccel_get_tag(skb, &vlan_tci);
924
925 /* send down to lld */
926 fr_dev(fp) = lport;
927 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: "
928 "src=%06x dest=%06x r_ctl=%x type=%x vlan=%04x.\n",
929 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type,
930 vlan_tci);
931 if (qedf_dump_frames)
932 print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
933 1, skb->data, skb->len, false);
934 qed_ops->ll2->start_xmit(qedf->cdev, skb);
935
936 return 0;
937}
938
939static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
940{
941 int rval = 0;
942 u32 *pbl;
943 dma_addr_t page;
944 int num_pages;
945
946 /* Calculate appropriate queue and PBL sizes */
947 fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
948 fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE);
949 fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) *
950 sizeof(void *);
951 fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
952
953 fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
954 &fcport->sq_dma, GFP_KERNEL);
955 if (!fcport->sq) {
956 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send "
957 "queue.\n");
958 rval = 1;
959 goto out;
960 }
961 memset(fcport->sq, 0, fcport->sq_mem_size);
962
963 fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
964 fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL);
965 if (!fcport->sq_pbl) {
966 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send "
967 "queue PBL.\n");
968 rval = 1;
969 goto out_free_sq;
970 }
971 memset(fcport->sq_pbl, 0, fcport->sq_pbl_size);
972
973 /* Create PBL */
974 num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE;
975 page = fcport->sq_dma;
976 pbl = (u32 *)fcport->sq_pbl;
977
978 while (num_pages--) {
979 *pbl = U64_LO(page);
980 pbl++;
981 *pbl = U64_HI(page);
982 pbl++;
983 page += QEDF_PAGE_SIZE;
984 }
985
986 return rval;
987
988out_free_sq:
989 dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq,
990 fcport->sq_dma);
991out:
992 return rval;
993}
994
995static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
996{
997 if (fcport->sq_pbl)
998 dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size,
999 fcport->sq_pbl, fcport->sq_pbl_dma);
1000 if (fcport->sq)
1001 dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1002 fcport->sq, fcport->sq_dma);
1003}
1004
1005static int qedf_offload_connection(struct qedf_ctx *qedf,
1006 struct qedf_rport *fcport)
1007{
1008 struct qed_fcoe_params_offload conn_info;
1009 u32 port_id;
1010 u8 lport_src_id[3];
1011 int rval;
1012 uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe));
1013
1014 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection "
1015 "portid=%06x.\n", fcport->rdata->ids.port_id);
1016 rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle,
1017 &fcport->fw_cid, &fcport->p_doorbell);
1018 if (rval) {
1019 QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection "
1020 "for portid=%06x.\n", fcport->rdata->ids.port_id);
1021 rval = 1; /* For some reason qed returns 0 on failure here */
1022 goto out;
1023 }
1024
1025 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x "
1026 "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id,
1027 fcport->fw_cid, fcport->handle);
1028
1029 memset(&conn_info, 0, sizeof(struct qed_fcoe_params_offload));
1030
1031 /* Fill in the offload connection info */
1032 conn_info.sq_pbl_addr = fcport->sq_pbl_dma;
1033
1034 conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl);
1035 conn_info.sq_next_page_addr =
1036 (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8));
1037
1038 /* Need to use our FCoE MAC for the offload session */
1039 port_id = fc_host_port_id(qedf->lport->host);
1040 lport_src_id[2] = (port_id & 0x000000FF);
1041 lport_src_id[1] = (port_id & 0x0000FF00) >> 8;
1042 lport_src_id[0] = (port_id & 0x00FF0000) >> 16;
1043 fc_fcoe_set_mac(conn_info.src_mac, lport_src_id);
1044
1045 ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
1046
1047 conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
1048 conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov / 20;
1049 conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
1050 conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
1051
1052 /* Set VLAN data */
1053 conn_info.vlan_tag = qedf->vlan_id <<
1054 FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT;
1055 conn_info.vlan_tag |=
1056 qedf_default_prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
1057 conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK <<
1058 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT);
1059
1060 /* Set host port source id */
1061 port_id = fc_host_port_id(qedf->lport->host);
1062 fcport->sid = port_id;
1063 conn_info.s_id.addr_hi = (port_id & 0x000000FF);
1064 conn_info.s_id.addr_mid = (port_id & 0x0000FF00) >> 8;
1065 conn_info.s_id.addr_lo = (port_id & 0x00FF0000) >> 16;
1066
1067 conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq;
1068
1069 /* Set remote port destination id */
1070 port_id = fcport->rdata->rport->port_id;
1071 conn_info.d_id.addr_hi = (port_id & 0x000000FF);
1072 conn_info.d_id.addr_mid = (port_id & 0x0000FF00) >> 8;
1073 conn_info.d_id.addr_lo = (port_id & 0x00FF0000) >> 16;
1074
1075 conn_info.def_q_idx = 0; /* Default index for send queue? */
1076
1077 /* Set FC-TAPE specific flags if needed */
1078 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1079 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN,
1080 "Enable CONF, REC for portid=%06x.\n",
1081 fcport->rdata->ids.port_id);
1082 conn_info.flags |= 1 <<
1083 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT;
1084 conn_info.flags |=
1085 ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
1086 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT;
1087 }
1088
1089 rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info);
1090 if (rval) {
1091 QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection "
1092 "for portid=%06x.\n", fcport->rdata->ids.port_id);
1093 goto out_free_conn;
1094 } else
1095 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload "
1096 "succeeded portid=%06x total_sqe=%d.\n",
1097 fcport->rdata->ids.port_id, total_sqe);
1098
1099 spin_lock_init(&fcport->rport_lock);
1100 atomic_set(&fcport->free_sqes, total_sqe);
1101 return 0;
1102out_free_conn:
1103 qed_ops->release_conn(qedf->cdev, fcport->handle);
1104out:
1105 return rval;
1106}
1107
1108#define QEDF_TERM_BUFF_SIZE 10
1109static void qedf_upload_connection(struct qedf_ctx *qedf,
1110 struct qedf_rport *fcport)
1111{
1112 void *term_params;
1113 dma_addr_t term_params_dma;
1114
1115 /* Term params needs to be a DMA coherent buffer as qed shared the
1116 * physical DMA address with the firmware. The buffer may be used in
1117 * the receive path so we may eventually have to move this.
1118 */
1119 term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
1120 &term_params_dma, GFP_KERNEL);
1121
1122 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
1123 "port_id=%06x.\n", fcport->rdata->ids.port_id);
1124
1125 qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma);
1126 qed_ops->release_conn(qedf->cdev, fcport->handle);
1127
1128 dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params,
1129 term_params_dma);
1130}
1131
1132static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
1133 struct qedf_rport *fcport)
1134{
1135 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
1136 fcport->rdata->ids.port_id);
1137
1138 /* Flush any remaining i/o's before we upload the connection */
1139 qedf_flush_active_ios(fcport, -1);
1140
1141 if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))
1142 qedf_upload_connection(qedf, fcport);
1143 qedf_free_sq(qedf, fcport);
1144 fcport->rdata = NULL;
1145 fcport->qedf = NULL;
1146}
1147
1148/**
1149 * This event_callback is called after successful completion of libfc
1150 * initiated target login. qedf can proceed with initiating the session
1151 * establishment.
1152 */
1153static void qedf_rport_event_handler(struct fc_lport *lport,
1154 struct fc_rport_priv *rdata,
1155 enum fc_rport_event event)
1156{
1157 struct qedf_ctx *qedf = lport_priv(lport);
1158 struct fc_rport *rport = rdata->rport;
1159 struct fc_rport_libfc_priv *rp;
1160 struct qedf_rport *fcport;
1161 u32 port_id;
1162 int rval;
1163 unsigned long flags;
1164
1165 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, "
1166 "port_id = 0x%x\n", event, rdata->ids.port_id);
1167
1168 switch (event) {
1169 case RPORT_EV_READY:
1170 if (!rport) {
1171 QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n");
1172 break;
1173 }
1174
1175 rp = rport->dd_data;
1176 fcport = (struct qedf_rport *)&rp[1];
1177 fcport->qedf = qedf;
1178
1179 if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) {
1180 QEDF_ERR(&(qedf->dbg_ctx), "Not offloading "
1181 "portid=0x%x as max number of offloaded sessions "
1182 "reached.\n", rdata->ids.port_id);
1183 return;
1184 }
1185
1186 /*
1187 * Don't try to offload the session again. Can happen when we
1188 * get an ADISC
1189 */
1190 if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1191 QEDF_WARN(&(qedf->dbg_ctx), "Session already "
1192 "offloaded, portid=0x%x.\n",
1193 rdata->ids.port_id);
1194 return;
1195 }
1196
1197 if (rport->port_id == FC_FID_DIR_SERV) {
1198 /*
1199 * qedf_rport structure doesn't exist for
1200 * directory server.
1201 * We should not come here, as lport will
1202 * take care of fabric login
1203 */
1204 QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not "
1205 "exist for dir server port_id=%x\n",
1206 rdata->ids.port_id);
1207 break;
1208 }
1209
1210 if (rdata->spp_type != FC_TYPE_FCP) {
1211 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1212 "Not offlading since since spp type isn't FCP\n");
1213 break;
1214 }
1215 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
1216 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1217 "Not FCP target so not offloading\n");
1218 break;
1219 }
1220
1221 fcport->rdata = rdata;
1222 fcport->rport = rport;
1223
1224 rval = qedf_alloc_sq(qedf, fcport);
1225 if (rval) {
1226 qedf_cleanup_fcport(qedf, fcport);
1227 break;
1228 }
1229
1230 /* Set device type */
1231 if (rdata->flags & FC_RP_FLAGS_RETRY &&
1232 rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
1233 !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
1234 fcport->dev_type = QEDF_RPORT_TYPE_TAPE;
1235 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1236 "portid=%06x is a TAPE device.\n",
1237 rdata->ids.port_id);
1238 } else {
1239 fcport->dev_type = QEDF_RPORT_TYPE_DISK;
1240 }
1241
1242 rval = qedf_offload_connection(qedf, fcport);
1243 if (rval) {
1244 qedf_cleanup_fcport(qedf, fcport);
1245 break;
1246 }
1247
1248 /* Add fcport to list of qedf_ctx list of offloaded ports */
1249 spin_lock_irqsave(&qedf->hba_lock, flags);
1250 list_add_rcu(&fcport->peers, &qedf->fcports);
1251 spin_unlock_irqrestore(&qedf->hba_lock, flags);
1252
1253 /*
1254 * Set the session ready bit to let everyone know that this
1255 * connection is ready for I/O
1256 */
1257 set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags);
1258 atomic_inc(&qedf->num_offloads);
1259
1260 break;
1261 case RPORT_EV_LOGO:
1262 case RPORT_EV_FAILED:
1263 case RPORT_EV_STOP:
1264 port_id = rdata->ids.port_id;
1265 if (port_id == FC_FID_DIR_SERV)
1266 break;
1267
1268 if (!rport) {
1269 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1270 "port_id=%x - rport notcreated Yet!!\n", port_id);
1271 break;
1272 }
1273 rp = rport->dd_data;
1274 /*
1275 * Perform session upload. Note that rdata->peers is already
1276 * removed from disc->rports list before we get this event.
1277 */
1278 fcport = (struct qedf_rport *)&rp[1];
1279
1280 /* Only free this fcport if it is offloaded already */
1281 if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1282 set_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags);
1283 qedf_cleanup_fcport(qedf, fcport);
1284
1285 /*
1286 * Remove fcport to list of qedf_ctx list of offloaded
1287 * ports
1288 */
1289 spin_lock_irqsave(&qedf->hba_lock, flags);
1290 list_del_rcu(&fcport->peers);
1291 spin_unlock_irqrestore(&qedf->hba_lock, flags);
1292
1293 clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1294 &fcport->flags);
1295 atomic_dec(&qedf->num_offloads);
1296 }
1297
1298 break;
1299
1300 case RPORT_EV_NONE:
1301 break;
1302 }
1303}
1304
1305static void qedf_abort_io(struct fc_lport *lport)
1306{
1307 /* NO-OP but need to fill in the template */
1308}
1309
1310static void qedf_fcp_cleanup(struct fc_lport *lport)
1311{
1312 /*
1313 * NO-OP but need to fill in template to prevent a NULL
1314 * function pointer dereference during link down. I/Os
1315 * will be flushed when port is uploaded.
1316 */
1317}
1318
1319static struct libfc_function_template qedf_lport_template = {
1320 .frame_send = qedf_xmit,
1321 .fcp_abort_io = qedf_abort_io,
1322 .fcp_cleanup = qedf_fcp_cleanup,
1323 .rport_event_callback = qedf_rport_event_handler,
1324 .elsct_send = qedf_elsct_send,
1325};
1326
1327static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
1328{
1329 fcoe_ctlr_init(&qedf->ctlr, FIP_ST_AUTO);
1330
1331 qedf->ctlr.send = qedf_fip_send;
1332 qedf->ctlr.update_mac = qedf_update_src_mac;
1333 qedf->ctlr.get_src_addr = qedf_get_src_mac;
1334 ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac);
1335}
1336
1337static int qedf_lport_setup(struct qedf_ctx *qedf)
1338{
1339 struct fc_lport *lport = qedf->lport;
1340
1341 lport->link_up = 0;
1342 lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
1343 lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
1344 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1345 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1346 lport->boot_time = jiffies;
1347 lport->e_d_tov = 2 * 1000;
1348 lport->r_a_tov = 10 * 1000;
1349
1350 /* Set NPIV support */
1351 lport->does_npiv = 1;
1352 fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV;
1353
1354 fc_set_wwnn(lport, qedf->wwnn);
1355 fc_set_wwpn(lport, qedf->wwpn);
1356
1357 fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0);
1358
1359 /* Allocate the exchange manager */
1360 fc_exch_mgr_alloc(lport, FC_CLASS_3, qedf->max_scsi_xid + 1,
1361 qedf->max_els_xid, NULL);
1362
1363 if (fc_lport_init_stats(lport))
1364 return -ENOMEM;
1365
1366 /* Finish lport config */
1367 fc_lport_config(lport);
1368
1369 /* Set max frame size */
1370 fc_set_mfs(lport, QEDF_MFS);
1371 fc_host_maxframe_size(lport->host) = lport->mfs;
1372
1373 /* Set default dev_loss_tmo based on module parameter */
1374 fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
1375
1376 /* Set symbolic node name */
1377 snprintf(fc_host_symbolic_name(lport->host), 256,
1378 "QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION);
1379
1380 return 0;
1381}
1382
1383/*
1384 * NPIV functions
1385 */
1386
1387static int qedf_vport_libfc_config(struct fc_vport *vport,
1388 struct fc_lport *lport)
1389{
1390 lport->link_up = 0;
1391 lport->qfull = 0;
1392 lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
1393 lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
1394 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1395 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1396 lport->boot_time = jiffies;
1397 lport->e_d_tov = 2 * 1000;
1398 lport->r_a_tov = 10 * 1000;
1399 lport->does_npiv = 1; /* Temporary until we add NPIV support */
1400
1401 /* Allocate stats for vport */
1402 if (fc_lport_init_stats(lport))
1403 return -ENOMEM;
1404
1405 /* Finish lport config */
1406 fc_lport_config(lport);
1407
1408 /* offload related configuration */
1409 lport->crc_offload = 0;
1410 lport->seq_offload = 0;
1411 lport->lro_enabled = 0;
1412 lport->lro_xid = 0;
1413 lport->lso_max = 0;
1414
1415 return 0;
1416}
1417
1418static int qedf_vport_create(struct fc_vport *vport, bool disabled)
1419{
1420 struct Scsi_Host *shost = vport_to_shost(vport);
1421 struct fc_lport *n_port = shost_priv(shost);
1422 struct fc_lport *vn_port;
1423 struct qedf_ctx *base_qedf = lport_priv(n_port);
1424 struct qedf_ctx *vport_qedf;
1425
1426 char buf[32];
1427 int rc = 0;
1428
1429 rc = fcoe_validate_vport_create(vport);
1430 if (rc) {
1431 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
1432 QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
1433 "WWPN (0x%s) already exists.\n", buf);
1434 goto err1;
1435 }
1436
1437 if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
1438 QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
1439 "because link is not up.\n");
1440 rc = -EIO;
1441 goto err1;
1442 }
1443
1444 vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
1445 if (!vn_port) {
1446 QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
1447 "for vport.\n");
1448 rc = -ENOMEM;
1449 goto err1;
1450 }
1451
1452 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
1453 QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n",
1454 buf);
1455
1456 /* Copy some fields from base_qedf */
1457 vport_qedf = lport_priv(vn_port);
1458 memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx));
1459
1460 /* Set qedf data specific to this vport */
1461 vport_qedf->lport = vn_port;
1462 /* Use same hba_lock as base_qedf */
1463 vport_qedf->hba_lock = base_qedf->hba_lock;
1464 vport_qedf->pdev = base_qedf->pdev;
1465 vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
1466 init_completion(&vport_qedf->flogi_compl);
1467 INIT_LIST_HEAD(&vport_qedf->fcports);
1468
1469 rc = qedf_vport_libfc_config(vport, vn_port);
1470 if (rc) {
1471 QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
1472 "for lport stats.\n");
1473 goto err2;
1474 }
1475
1476 fc_set_wwnn(vn_port, vport->node_name);
1477 fc_set_wwpn(vn_port, vport->port_name);
1478 vport_qedf->wwnn = vn_port->wwnn;
1479 vport_qedf->wwpn = vn_port->wwpn;
1480
1481 vn_port->host->transportt = qedf_fc_vport_transport_template;
1482 vn_port->host->can_queue = QEDF_MAX_ELS_XID;
1483 vn_port->host->max_lun = qedf_max_lun;
1484 vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
1485 vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
1486
1487 rc = scsi_add_host(vn_port->host, &vport->dev);
1488 if (rc) {
1489 QEDF_WARN(&(base_qedf->dbg_ctx), "Error adding Scsi_Host.\n");
1490 goto err2;
1491 }
1492
1493 /* Set default dev_loss_tmo based on module parameter */
1494 fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo;
1495
1496 /* Init libfc stuffs */
1497 memcpy(&vn_port->tt, &qedf_lport_template,
1498 sizeof(qedf_lport_template));
1499 fc_exch_init(vn_port);
1500 fc_elsct_init(vn_port);
1501 fc_lport_init(vn_port);
1502 fc_disc_init(vn_port);
1503 fc_disc_config(vn_port, vn_port);
1504
1505
1506 /* Allocate the exchange manager */
1507 shost = vport_to_shost(vport);
1508 n_port = shost_priv(shost);
1509 fc_exch_mgr_list_clone(n_port, vn_port);
1510
1511 /* Set max frame size */
1512 fc_set_mfs(vn_port, QEDF_MFS);
1513
1514 fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN;
1515
1516 if (disabled) {
1517 fc_vport_set_state(vport, FC_VPORT_DISABLED);
1518 } else {
1519 vn_port->boot_time = jiffies;
1520 fc_fabric_login(vn_port);
1521 fc_vport_setlink(vn_port);
1522 }
1523
1524 QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
1525 vn_port);
1526
1527 /* Set up debug context for vport */
1528 vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
1529 vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
1530
1531err2:
1532 scsi_host_put(vn_port->host);
1533err1:
1534 return rc;
1535}
1536
1537static int qedf_vport_destroy(struct fc_vport *vport)
1538{
1539 struct Scsi_Host *shost = vport_to_shost(vport);
1540 struct fc_lport *n_port = shost_priv(shost);
1541 struct fc_lport *vn_port = vport->dd_data;
1542
1543 mutex_lock(&n_port->lp_mutex);
1544 list_del(&vn_port->list);
1545 mutex_unlock(&n_port->lp_mutex);
1546
1547 fc_fabric_logoff(vn_port);
1548 fc_lport_destroy(vn_port);
1549
1550 /* Detach from scsi-ml */
1551 fc_remove_host(vn_port->host);
1552 scsi_remove_host(vn_port->host);
1553
1554 /*
1555 * Only try to release the exchange manager if the vn_port
1556 * configuration is complete.
1557 */
1558 if (vn_port->state == LPORT_ST_READY)
1559 fc_exch_mgr_free(vn_port);
1560
1561 /* Free memory used by statistical counters */
1562 fc_lport_free_stats(vn_port);
1563
1564 /* Release Scsi_Host */
1565 if (vn_port->host)
1566 scsi_host_put(vn_port->host);
1567
1568 return 0;
1569}
1570
1571static int qedf_vport_disable(struct fc_vport *vport, bool disable)
1572{
1573 struct fc_lport *lport = vport->dd_data;
1574
1575 if (disable) {
1576 fc_vport_set_state(vport, FC_VPORT_DISABLED);
1577 fc_fabric_logoff(lport);
1578 } else {
1579 lport->boot_time = jiffies;
1580 fc_fabric_login(lport);
1581 fc_vport_setlink(lport);
1582 }
1583 return 0;
1584}
1585
1586/*
1587 * During removal we need to wait for all the vports associated with a port
1588 * to be destroyed so we avoid a race condition where libfc is still trying
1589 * to reap vports while the driver remove function has already reaped the
1590 * driver contexts associated with the physical port.
1591 */
1592static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf)
1593{
1594 struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host);
1595
1596 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
1597 "Entered.\n");
1598 while (fc_host->npiv_vports_inuse > 0) {
1599 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
1600 "Waiting for all vports to be reaped.\n");
1601 msleep(1000);
1602 }
1603}
1604
1605/**
1606 * qedf_fcoe_reset - Resets the fcoe
1607 *
1608 * @shost: shost the reset is from
1609 *
1610 * Returns: always 0
1611 */
1612static int qedf_fcoe_reset(struct Scsi_Host *shost)
1613{
1614 struct fc_lport *lport = shost_priv(shost);
1615
1616 fc_fabric_logoff(lport);
1617 fc_fabric_login(lport);
1618 return 0;
1619}
1620
1621static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
1622 *shost)
1623{
1624 struct fc_host_statistics *qedf_stats;
1625 struct fc_lport *lport = shost_priv(shost);
1626 struct qedf_ctx *qedf = lport_priv(lport);
1627 struct qed_fcoe_stats *fw_fcoe_stats;
1628
1629 qedf_stats = fc_get_host_stats(shost);
1630
1631 /* We don't collect offload stats for specific NPIV ports */
1632 if (lport->vport)
1633 goto out;
1634
1635 fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL);
1636 if (!fw_fcoe_stats) {
1637 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
1638 "fw_fcoe_stats.\n");
1639 goto out;
1640 }
1641
1642 /* Query firmware for offload stats */
1643 qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
1644
1645 /*
1646 * The expectation is that we add our offload stats to the stats
1647 * being maintained by libfc each time the fc_get_host_status callback
1648 * is invoked. The additions are not carried over for each call to
1649 * the fc_get_host_stats callback.
1650 */
1651 qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt +
1652 fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt +
1653 fw_fcoe_stats->fcoe_tx_other_pkt_cnt;
1654 qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt +
1655 fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt +
1656 fw_fcoe_stats->fcoe_rx_other_pkt_cnt;
1657 qedf_stats->fcp_input_megabytes +=
1658 do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000);
1659 qedf_stats->fcp_output_megabytes +=
1660 do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000);
1661 qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4;
1662 qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4;
1663 qedf_stats->invalid_crc_count +=
1664 fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt;
1665 qedf_stats->dumped_frames =
1666 fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
1667 qedf_stats->error_frames +=
1668 fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
1669 qedf_stats->fcp_input_requests += qedf->input_requests;
1670 qedf_stats->fcp_output_requests += qedf->output_requests;
1671 qedf_stats->fcp_control_requests += qedf->control_requests;
1672 qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
1673 qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
1674
1675 kfree(fw_fcoe_stats);
1676out:
1677 return qedf_stats;
1678}
1679
1680static struct fc_function_template qedf_fc_transport_fn = {
1681 .show_host_node_name = 1,
1682 .show_host_port_name = 1,
1683 .show_host_supported_classes = 1,
1684 .show_host_supported_fc4s = 1,
1685 .show_host_active_fc4s = 1,
1686 .show_host_maxframe_size = 1,
1687
1688 .show_host_port_id = 1,
1689 .show_host_supported_speeds = 1,
1690 .get_host_speed = fc_get_host_speed,
1691 .show_host_speed = 1,
1692 .show_host_port_type = 1,
1693 .get_host_port_state = fc_get_host_port_state,
1694 .show_host_port_state = 1,
1695 .show_host_symbolic_name = 1,
1696
1697 /*
1698 * Tell FC transport to allocate enough space to store the backpointer
1699 * for the associate qedf_rport struct.
1700 */
1701 .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
1702 sizeof(struct qedf_rport)),
1703 .show_rport_maxframe_size = 1,
1704 .show_rport_supported_classes = 1,
1705 .show_host_fabric_name = 1,
1706 .show_starget_node_name = 1,
1707 .show_starget_port_name = 1,
1708 .show_starget_port_id = 1,
1709 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
1710 .show_rport_dev_loss_tmo = 1,
1711 .get_fc_host_stats = qedf_fc_get_host_stats,
1712 .issue_fc_host_lip = qedf_fcoe_reset,
1713 .vport_create = qedf_vport_create,
1714 .vport_delete = qedf_vport_destroy,
1715 .vport_disable = qedf_vport_disable,
1716 .bsg_request = fc_lport_bsg_request,
1717};
1718
1719static struct fc_function_template qedf_fc_vport_transport_fn = {
1720 .show_host_node_name = 1,
1721 .show_host_port_name = 1,
1722 .show_host_supported_classes = 1,
1723 .show_host_supported_fc4s = 1,
1724 .show_host_active_fc4s = 1,
1725 .show_host_maxframe_size = 1,
1726 .show_host_port_id = 1,
1727 .show_host_supported_speeds = 1,
1728 .get_host_speed = fc_get_host_speed,
1729 .show_host_speed = 1,
1730 .show_host_port_type = 1,
1731 .get_host_port_state = fc_get_host_port_state,
1732 .show_host_port_state = 1,
1733 .show_host_symbolic_name = 1,
1734 .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
1735 sizeof(struct qedf_rport)),
1736 .show_rport_maxframe_size = 1,
1737 .show_rport_supported_classes = 1,
1738 .show_host_fabric_name = 1,
1739 .show_starget_node_name = 1,
1740 .show_starget_port_name = 1,
1741 .show_starget_port_id = 1,
1742 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
1743 .show_rport_dev_loss_tmo = 1,
1744 .get_fc_host_stats = fc_get_host_stats,
1745 .issue_fc_host_lip = qedf_fcoe_reset,
1746 .bsg_request = fc_lport_bsg_request,
1747};
1748
1749static bool qedf_fp_has_work(struct qedf_fastpath *fp)
1750{
1751 struct qedf_ctx *qedf = fp->qedf;
1752 struct global_queue *que;
1753 struct qed_sb_info *sb_info = fp->sb_info;
1754 struct status_block *sb = sb_info->sb_virt;
1755 u16 prod_idx;
1756
1757 /* Get the pointer to the global CQ this completion is on */
1758 que = qedf->global_queues[fp->sb_id];
1759
1760 /* Be sure all responses have been written to PI */
1761 rmb();
1762
1763 /* Get the current firmware producer index */
1764 prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
1765
1766 return (que->cq_prod_idx != prod_idx);
1767}
1768
1769/*
1770 * Interrupt handler code.
1771 */
1772
1773/* Process completion queue and copy CQE contents for deferred processesing
1774 *
1775 * Return true if we should wake the I/O thread, false if not.
1776 */
1777static bool qedf_process_completions(struct qedf_fastpath *fp)
1778{
1779 struct qedf_ctx *qedf = fp->qedf;
1780 struct qed_sb_info *sb_info = fp->sb_info;
1781 struct status_block *sb = sb_info->sb_virt;
1782 struct global_queue *que;
1783 u16 prod_idx;
1784 struct fcoe_cqe *cqe;
1785 struct qedf_io_work *io_work;
1786 int num_handled = 0;
1787 unsigned int cpu;
1788 struct qedf_ioreq *io_req = NULL;
1789 u16 xid;
1790 u16 new_cqes;
1791 u32 comp_type;
1792
1793 /* Get the current firmware producer index */
1794 prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
1795
1796 /* Get the pointer to the global CQ this completion is on */
1797 que = qedf->global_queues[fp->sb_id];
1798
1799 /* Calculate the amount of new elements since last processing */
1800 new_cqes = (prod_idx >= que->cq_prod_idx) ?
1801 (prod_idx - que->cq_prod_idx) :
1802 0x10000 - que->cq_prod_idx + prod_idx;
1803
1804 /* Save producer index */
1805 que->cq_prod_idx = prod_idx;
1806
1807 while (new_cqes) {
1808 fp->completions++;
1809 num_handled++;
1810 cqe = &que->cq[que->cq_cons_idx];
1811
1812 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
1813 FCOE_CQE_CQE_TYPE_MASK;
1814
1815 /*
1816 * Process unsolicited CQEs directly in the interrupt handler
1817 * sine we need the fastpath ID
1818 */
1819 if (comp_type == FCOE_UNSOLIC_CQE_TYPE) {
1820 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
1821 "Unsolicated CQE.\n");
1822 qedf_process_unsol_compl(qedf, fp->sb_id, cqe);
1823 /*
1824 * Don't add a work list item. Increment consumer
1825 * consumer index and move on.
1826 */
1827 goto inc_idx;
1828 }
1829
1830 xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
1831 io_req = &qedf->cmd_mgr->cmds[xid];
1832
1833 /*
1834 * Figure out which percpu thread we should queue this I/O
1835 * on.
1836 */
1837 if (!io_req)
1838 /* If there is not io_req assocated with this CQE
1839 * just queue it on CPU 0
1840 */
1841 cpu = 0;
1842 else {
1843 cpu = io_req->cpu;
1844 io_req->int_cpu = smp_processor_id();
1845 }
1846
1847 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
1848 if (!io_work) {
1849 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
1850 "work for I/O completion.\n");
1851 continue;
1852 }
1853 memset(io_work, 0, sizeof(struct qedf_io_work));
1854
1855 INIT_WORK(&io_work->work, qedf_fp_io_handler);
1856
1857 /* Copy contents of CQE for deferred processing */
1858 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
1859
1860 io_work->qedf = fp->qedf;
1861 io_work->fp = NULL; /* Only used for unsolicited frames */
1862
1863 queue_work_on(cpu, qedf_io_wq, &io_work->work);
1864
1865inc_idx:
1866 que->cq_cons_idx++;
1867 if (que->cq_cons_idx == fp->cq_num_entries)
1868 que->cq_cons_idx = 0;
1869 new_cqes--;
1870 }
1871
1872 return true;
1873}
1874
1875
1876/* MSI-X fastpath handler code */
1877static irqreturn_t qedf_msix_handler(int irq, void *dev_id)
1878{
1879 struct qedf_fastpath *fp = dev_id;
1880
1881 if (!fp) {
1882 QEDF_ERR(NULL, "fp is null.\n");
1883 return IRQ_HANDLED;
1884 }
1885 if (!fp->sb_info) {
1886 QEDF_ERR(NULL, "fp->sb_info in null.");
1887 return IRQ_HANDLED;
1888 }
1889
1890 /*
1891 * Disable interrupts for this status block while we process new
1892 * completions
1893 */
1894 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
1895
1896 while (1) {
1897 qedf_process_completions(fp);
1898
1899 if (qedf_fp_has_work(fp) == 0) {
1900 /* Update the sb information */
1901 qed_sb_update_sb_idx(fp->sb_info);
1902
1903 /* Check for more work */
1904 rmb();
1905
1906 if (qedf_fp_has_work(fp) == 0) {
1907 /* Re-enable interrupts */
1908 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
1909 return IRQ_HANDLED;
1910 }
1911 }
1912 }
1913
1914 /* Do we ever want to break out of above loop? */
1915 return IRQ_HANDLED;
1916}
1917
1918/* simd handler for MSI/INTa */
1919static void qedf_simd_int_handler(void *cookie)
1920{
1921 /* Cookie is qedf_ctx struct */
1922 struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
1923
1924 QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf);
1925}
1926
1927#define QEDF_SIMD_HANDLER_NUM 0
1928static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
1929{
1930 int i;
1931
1932 if (qedf->int_info.msix_cnt) {
1933 for (i = 0; i < qedf->int_info.used_cnt; i++) {
1934 synchronize_irq(qedf->int_info.msix[i].vector);
1935 irq_set_affinity_hint(qedf->int_info.msix[i].vector,
1936 NULL);
1937 irq_set_affinity_notifier(qedf->int_info.msix[i].vector,
1938 NULL);
1939 free_irq(qedf->int_info.msix[i].vector,
1940 &qedf->fp_array[i]);
1941 }
1942 } else
1943 qed_ops->common->simd_handler_clean(qedf->cdev,
1944 QEDF_SIMD_HANDLER_NUM);
1945
1946 qedf->int_info.used_cnt = 0;
1947 qed_ops->common->set_fp_int(qedf->cdev, 0);
1948}
1949
1950static int qedf_request_msix_irq(struct qedf_ctx *qedf)
1951{
1952 int i, rc, cpu;
1953
1954 cpu = cpumask_first(cpu_online_mask);
1955 for (i = 0; i < qedf->num_queues; i++) {
1956 rc = request_irq(qedf->int_info.msix[i].vector,
1957 qedf_msix_handler, 0, "qedf", &qedf->fp_array[i]);
1958
1959 if (rc) {
1960 QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
1961 qedf_sync_free_irqs(qedf);
1962 return rc;
1963 }
1964
1965 qedf->int_info.used_cnt++;
1966 rc = irq_set_affinity_hint(qedf->int_info.msix[i].vector,
1967 get_cpu_mask(cpu));
1968 cpu = cpumask_next(cpu, cpu_online_mask);
1969 }
1970
1971 return 0;
1972}
1973
1974static int qedf_setup_int(struct qedf_ctx *qedf)
1975{
1976 int rc = 0;
1977
1978 /*
1979 * Learn interrupt configuration
1980 */
1981 rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus());
1982
1983 rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info);
1984 if (rc)
1985 return 0;
1986
1987 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = "
1988 "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt,
1989 num_online_cpus());
1990
1991 if (qedf->int_info.msix_cnt)
1992 return qedf_request_msix_irq(qedf);
1993
1994 qed_ops->common->simd_handler_config(qedf->cdev, &qedf,
1995 QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
1996 qedf->int_info.used_cnt = 1;
1997
1998 return 0;
1999}
2000
2001/* Main function for libfc frame reception */
2002static void qedf_recv_frame(struct qedf_ctx *qedf,
2003 struct sk_buff *skb)
2004{
2005 u32 fr_len;
2006 struct fc_lport *lport;
2007 struct fc_frame_header *fh;
2008 struct fcoe_crc_eof crc_eof;
2009 struct fc_frame *fp;
2010 u8 *mac = NULL;
2011 u8 *dest_mac = NULL;
2012 struct fcoe_hdr *hp;
2013 struct qedf_rport *fcport;
2014
2015 lport = qedf->lport;
2016 if (lport == NULL || lport->state == LPORT_ST_DISABLED) {
2017 QEDF_WARN(NULL, "Invalid lport struct or lport disabled.\n");
2018 kfree_skb(skb);
2019 return;
2020 }
2021
2022 if (skb_is_nonlinear(skb))
2023 skb_linearize(skb);
2024 mac = eth_hdr(skb)->h_source;
2025 dest_mac = eth_hdr(skb)->h_dest;
2026
2027 /* Pull the header */
2028 hp = (struct fcoe_hdr *)skb->data;
2029 fh = (struct fc_frame_header *) skb_transport_header(skb);
2030 skb_pull(skb, sizeof(struct fcoe_hdr));
2031 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
2032
2033 fp = (struct fc_frame *)skb;
2034 fc_frame_init(fp);
2035 fr_dev(fp) = lport;
2036 fr_sof(fp) = hp->fcoe_sof;
2037 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
2038 kfree_skb(skb);
2039 return;
2040 }
2041 fr_eof(fp) = crc_eof.fcoe_eof;
2042 fr_crc(fp) = crc_eof.fcoe_crc32;
2043 if (pskb_trim(skb, fr_len)) {
2044 kfree_skb(skb);
2045 return;
2046 }
2047
2048 fh = fc_frame_header_get(fp);
2049
2050 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
2051 fh->fh_type == FC_TYPE_FCP) {
2052 /* Drop FCP data. We dont this in L2 path */
2053 kfree_skb(skb);
2054 return;
2055 }
2056 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
2057 fh->fh_type == FC_TYPE_ELS) {
2058 switch (fc_frame_payload_op(fp)) {
2059 case ELS_LOGO:
2060 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
2061 /* drop non-FIP LOGO */
2062 kfree_skb(skb);
2063 return;
2064 }
2065 break;
2066 }
2067 }
2068
2069 if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
2070 /* Drop incoming ABTS */
2071 kfree_skb(skb);
2072 return;
2073 }
2074
2075 /*
2076 * If a connection is uploading, drop incoming FCoE frames as there
2077 * is a small window where we could try to return a frame while libfc
2078 * is trying to clean things up.
2079 */
2080
2081 /* Get fcport associated with d_id if it exists */
2082 fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
2083
2084 if (fcport && test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
2085 &fcport->flags)) {
2086 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2087 "Connection uploading, dropping fp=%p.\n", fp);
2088 kfree_skb(skb);
2089 return;
2090 }
2091
2092 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: "
2093 "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp,
2094 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
2095 fh->fh_type);
2096 if (qedf_dump_frames)
2097 print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
2098 1, skb->data, skb->len, false);
2099 fc_exch_recv(lport, fp);
2100}
2101
2102static void qedf_ll2_process_skb(struct work_struct *work)
2103{
2104 struct qedf_skb_work *skb_work =
2105 container_of(work, struct qedf_skb_work, work);
2106 struct qedf_ctx *qedf = skb_work->qedf;
2107 struct sk_buff *skb = skb_work->skb;
2108 struct ethhdr *eh;
2109
2110 if (!qedf) {
2111 QEDF_ERR(NULL, "qedf is NULL\n");
2112 goto err_out;
2113 }
2114
2115 eh = (struct ethhdr *)skb->data;
2116
2117 /* Undo VLAN encapsulation */
2118 if (eh->h_proto == htons(ETH_P_8021Q)) {
2119 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
2120 eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
2121 skb_reset_mac_header(skb);
2122 }
2123
2124 /*
2125 * Process either a FIP frame or FCoE frame based on the
2126 * protocol value. If it's not either just drop the
2127 * frame.
2128 */
2129 if (eh->h_proto == htons(ETH_P_FIP)) {
2130 qedf_fip_recv(qedf, skb);
2131 goto out;
2132 } else if (eh->h_proto == htons(ETH_P_FCOE)) {
2133 __skb_pull(skb, ETH_HLEN);
2134 qedf_recv_frame(qedf, skb);
2135 goto out;
2136 } else
2137 goto err_out;
2138
2139err_out:
2140 kfree_skb(skb);
2141out:
2142 kfree(skb_work);
2143 return;
2144}
2145
2146static int qedf_ll2_rx(void *cookie, struct sk_buff *skb,
2147 u32 arg1, u32 arg2)
2148{
2149 struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2150 struct qedf_skb_work *skb_work;
2151
2152 skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC);
2153 if (!skb_work) {
2154 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
2155 "dropping frame.\n");
2156 kfree_skb(skb);
2157 return 0;
2158 }
2159
2160 INIT_WORK(&skb_work->work, qedf_ll2_process_skb);
2161 skb_work->skb = skb;
2162 skb_work->qedf = qedf;
2163 queue_work(qedf->ll2_recv_wq, &skb_work->work);
2164
2165 return 0;
2166}
2167
2168static struct qed_ll2_cb_ops qedf_ll2_cb_ops = {
2169 .rx_cb = qedf_ll2_rx,
2170 .tx_cb = NULL,
2171};
2172
2173/* Main thread to process I/O completions */
2174void qedf_fp_io_handler(struct work_struct *work)
2175{
2176 struct qedf_io_work *io_work =
2177 container_of(work, struct qedf_io_work, work);
2178 u32 comp_type;
2179
2180 /*
2181 * Deferred part of unsolicited CQE sends
2182 * frame to libfc.
2183 */
2184 comp_type = (io_work->cqe.cqe_data >>
2185 FCOE_CQE_CQE_TYPE_SHIFT) &
2186 FCOE_CQE_CQE_TYPE_MASK;
2187 if (comp_type == FCOE_UNSOLIC_CQE_TYPE &&
2188 io_work->fp)
2189 fc_exch_recv(io_work->qedf->lport, io_work->fp);
2190 else
2191 qedf_process_cqe(io_work->qedf, &io_work->cqe);
2192
2193 kfree(io_work);
2194}
2195
2196static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
2197 struct qed_sb_info *sb_info, u16 sb_id)
2198{
2199 struct status_block *sb_virt;
2200 dma_addr_t sb_phys;
2201 int ret;
2202
2203 sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
2204 sizeof(struct status_block), &sb_phys, GFP_KERNEL);
2205
2206 if (!sb_virt) {
2207 QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
2208 "for id = %d.\n", sb_id);
2209 return -ENOMEM;
2210 }
2211
2212 ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys,
2213 sb_id, QED_SB_TYPE_STORAGE);
2214
2215 if (ret) {
2216 QEDF_ERR(&(qedf->dbg_ctx), "Status block initialization "
2217 "failed for id = %d.\n", sb_id);
2218 return ret;
2219 }
2220
2221 return 0;
2222}
2223
2224static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info)
2225{
2226 if (sb_info->sb_virt)
2227 dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt),
2228 (void *)sb_info->sb_virt, sb_info->sb_phys);
2229}
2230
2231static void qedf_destroy_sb(struct qedf_ctx *qedf)
2232{
2233 int id;
2234 struct qedf_fastpath *fp = NULL;
2235
2236 for (id = 0; id < qedf->num_queues; id++) {
2237 fp = &(qedf->fp_array[id]);
2238 if (fp->sb_id == QEDF_SB_ID_NULL)
2239 break;
2240 qedf_free_sb(qedf, fp->sb_info);
2241 kfree(fp->sb_info);
2242 }
2243 kfree(qedf->fp_array);
2244}
2245
2246static int qedf_prepare_sb(struct qedf_ctx *qedf)
2247{
2248 int id;
2249 struct qedf_fastpath *fp;
2250 int ret;
2251
2252 qedf->fp_array =
2253 kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath),
2254 GFP_KERNEL);
2255
2256 if (!qedf->fp_array) {
2257 QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation "
2258 "failed.\n");
2259 return -ENOMEM;
2260 }
2261
2262 for (id = 0; id < qedf->num_queues; id++) {
2263 fp = &(qedf->fp_array[id]);
2264 fp->sb_id = QEDF_SB_ID_NULL;
2265 fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
2266 if (!fp->sb_info) {
2267 QEDF_ERR(&(qedf->dbg_ctx), "SB info struct "
2268 "allocation failed.\n");
2269 goto err;
2270 }
2271 ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id);
2272 if (ret) {
2273 QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and "
2274 "initialization failed.\n");
2275 goto err;
2276 }
2277 fp->sb_id = id;
2278 fp->qedf = qedf;
2279 fp->cq_num_entries =
2280 qedf->global_queues[id]->cq_mem_size /
2281 sizeof(struct fcoe_cqe);
2282 }
2283err:
2284 return 0;
2285}
2286
2287void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
2288{
2289 u16 xid;
2290 struct qedf_ioreq *io_req;
2291 struct qedf_rport *fcport;
2292 u32 comp_type;
2293
2294 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
2295 FCOE_CQE_CQE_TYPE_MASK;
2296
2297 xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
2298 io_req = &qedf->cmd_mgr->cmds[xid];
2299
2300 /* Completion not for a valid I/O anymore so just return */
2301 if (!io_req)
2302 return;
2303
2304 fcport = io_req->fcport;
2305
2306 if (fcport == NULL) {
2307 QEDF_ERR(&(qedf->dbg_ctx), "fcport is NULL.\n");
2308 return;
2309 }
2310
2311 /*
2312 * Check that fcport is offloaded. If it isn't then the spinlock
2313 * isn't valid and shouldn't be taken. We should just return.
2314 */
2315 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2316 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
2317 return;
2318 }
2319
2320
2321 switch (comp_type) {
2322 case FCOE_GOOD_COMPLETION_CQE_TYPE:
2323 atomic_inc(&fcport->free_sqes);
2324 switch (io_req->cmd_type) {
2325 case QEDF_SCSI_CMD:
2326 qedf_scsi_completion(qedf, cqe, io_req);
2327 break;
2328 case QEDF_ELS:
2329 qedf_process_els_compl(qedf, cqe, io_req);
2330 break;
2331 case QEDF_TASK_MGMT_CMD:
2332 qedf_process_tmf_compl(qedf, cqe, io_req);
2333 break;
2334 case QEDF_SEQ_CLEANUP:
2335 qedf_process_seq_cleanup_compl(qedf, cqe, io_req);
2336 break;
2337 }
2338 break;
2339 case FCOE_ERROR_DETECTION_CQE_TYPE:
2340 atomic_inc(&fcport->free_sqes);
2341 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2342 "Error detect CQE.\n");
2343 qedf_process_error_detect(qedf, cqe, io_req);
2344 break;
2345 case FCOE_EXCH_CLEANUP_CQE_TYPE:
2346 atomic_inc(&fcport->free_sqes);
2347 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2348 "Cleanup CQE.\n");
2349 qedf_process_cleanup_compl(qedf, cqe, io_req);
2350 break;
2351 case FCOE_ABTS_CQE_TYPE:
2352 atomic_inc(&fcport->free_sqes);
2353 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2354 "Abort CQE.\n");
2355 qedf_process_abts_compl(qedf, cqe, io_req);
2356 break;
2357 case FCOE_DUMMY_CQE_TYPE:
2358 atomic_inc(&fcport->free_sqes);
2359 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2360 "Dummy CQE.\n");
2361 break;
2362 case FCOE_LOCAL_COMP_CQE_TYPE:
2363 atomic_inc(&fcport->free_sqes);
2364 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2365 "Local completion CQE.\n");
2366 break;
2367 case FCOE_WARNING_CQE_TYPE:
2368 atomic_inc(&fcport->free_sqes);
2369 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2370 "Warning CQE.\n");
2371 qedf_process_warning_compl(qedf, cqe, io_req);
2372 break;
2373 case MAX_FCOE_CQE_TYPE:
2374 atomic_inc(&fcport->free_sqes);
2375 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2376 "Max FCoE CQE.\n");
2377 break;
2378 default:
2379 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2380 "Default CQE.\n");
2381 break;
2382 }
2383}
2384
2385static void qedf_free_bdq(struct qedf_ctx *qedf)
2386{
2387 int i;
2388
2389 if (qedf->bdq_pbl_list)
2390 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2391 qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma);
2392
2393 if (qedf->bdq_pbl)
2394 dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size,
2395 qedf->bdq_pbl, qedf->bdq_pbl_dma);
2396
2397 for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2398 if (qedf->bdq[i].buf_addr) {
2399 dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE,
2400 qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma);
2401 }
2402 }
2403}
2404
2405static void qedf_free_global_queues(struct qedf_ctx *qedf)
2406{
2407 int i;
2408 struct global_queue **gl = qedf->global_queues;
2409
2410 for (i = 0; i < qedf->num_queues; i++) {
2411 if (!gl[i])
2412 continue;
2413
2414 if (gl[i]->cq)
2415 dma_free_coherent(&qedf->pdev->dev,
2416 gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma);
2417 if (gl[i]->cq_pbl)
2418 dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size,
2419 gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
2420
2421 kfree(gl[i]);
2422 }
2423
2424 qedf_free_bdq(qedf);
2425}
2426
2427static int qedf_alloc_bdq(struct qedf_ctx *qedf)
2428{
2429 int i;
2430 struct scsi_bd *pbl;
2431 u64 *list;
2432 dma_addr_t page;
2433
2434 /* Alloc dma memory for BDQ buffers */
2435 for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2436 qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev,
2437 QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL);
2438 if (!qedf->bdq[i].buf_addr) {
2439 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ "
2440 "buffer %d.\n", i);
2441 return -ENOMEM;
2442 }
2443 }
2444
2445 /* Alloc dma memory for BDQ page buffer list */
2446 qedf->bdq_pbl_mem_size =
2447 QEDF_BDQ_SIZE * sizeof(struct scsi_bd);
2448 qedf->bdq_pbl_mem_size =
2449 ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE);
2450
2451 qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
2452 qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL);
2453 if (!qedf->bdq_pbl) {
2454 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n");
2455 return -ENOMEM;
2456 }
2457
2458 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2459 "BDQ PBL addr=0x%p dma=0x%llx.\n", qedf->bdq_pbl,
2460 qedf->bdq_pbl_dma);
2461
2462 /*
2463 * Populate BDQ PBL with physical and virtual address of individual
2464 * BDQ buffers
2465 */
2466 pbl = (struct scsi_bd *)qedf->bdq_pbl;
2467 for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2468 pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
2469 pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
2470 pbl->opaque.hi = 0;
2471 /* Opaque lo data is an index into the BDQ array */
2472 pbl->opaque.lo = cpu_to_le32(i);
2473 pbl++;
2474 }
2475
2476 /* Allocate list of PBL pages */
2477 qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
2478 QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL);
2479 if (!qedf->bdq_pbl_list) {
2480 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL "
2481 "pages.\n");
2482 return -ENOMEM;
2483 }
2484 memset(qedf->bdq_pbl_list, 0, QEDF_PAGE_SIZE);
2485
2486 /*
2487 * Now populate PBL list with pages that contain pointers to the
2488 * individual buffers.
2489 */
2490 qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size /
2491 QEDF_PAGE_SIZE;
2492 list = (u64 *)qedf->bdq_pbl_list;
2493 page = qedf->bdq_pbl_list_dma;
2494 for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) {
2495 *list = qedf->bdq_pbl_dma;
2496 list++;
2497 page += QEDF_PAGE_SIZE;
2498 }
2499
2500 return 0;
2501}
2502
2503static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
2504{
2505 u32 *list;
2506 int i;
2507 int status = 0, rc;
2508 u32 *pbl;
2509 dma_addr_t page;
2510 int num_pages;
2511
2512 /* Allocate and map CQs, RQs */
2513 /*
2514 * Number of global queues (CQ / RQ). This should
2515 * be <= number of available MSIX vectors for the PF
2516 */
2517 if (!qedf->num_queues) {
2518 QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
2519 return 1;
2520 }
2521
2522 /*
2523 * Make sure we allocated the PBL that will contain the physical
2524 * addresses of our queues
2525 */
2526 if (!qedf->p_cpuq) {
2527 status = 1;
2528 goto mem_alloc_failure;
2529 }
2530
2531 qedf->global_queues = kzalloc((sizeof(struct global_queue *)
2532 * qedf->num_queues), GFP_KERNEL);
2533 if (!qedf->global_queues) {
2534 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global "
2535 "queues array ptr memory\n");
2536 return -ENOMEM;
2537 }
2538 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2539 "qedf->global_queues=%p.\n", qedf->global_queues);
2540
2541 /* Allocate DMA coherent buffers for BDQ */
2542 rc = qedf_alloc_bdq(qedf);
2543 if (rc)
2544 goto mem_alloc_failure;
2545
2546 /* Allocate a CQ and an associated PBL for each MSI-X vector */
2547 for (i = 0; i < qedf->num_queues; i++) {
2548 qedf->global_queues[i] = kzalloc(sizeof(struct global_queue),
2549 GFP_KERNEL);
2550 if (!qedf->global_queues[i]) {
2551 QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocation "
2552 "global queue %d.\n", i);
2553 goto mem_alloc_failure;
2554 }
2555
2556 qedf->global_queues[i]->cq_mem_size =
2557 FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
2558 qedf->global_queues[i]->cq_mem_size =
2559 ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE);
2560
2561 qedf->global_queues[i]->cq_pbl_size =
2562 (qedf->global_queues[i]->cq_mem_size /
2563 PAGE_SIZE) * sizeof(void *);
2564 qedf->global_queues[i]->cq_pbl_size =
2565 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
2566
2567 qedf->global_queues[i]->cq =
2568 dma_alloc_coherent(&qedf->pdev->dev,
2569 qedf->global_queues[i]->cq_mem_size,
2570 &qedf->global_queues[i]->cq_dma, GFP_KERNEL);
2571
2572 if (!qedf->global_queues[i]->cq) {
2573 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2574 "cq.\n");
2575 status = -ENOMEM;
2576 goto mem_alloc_failure;
2577 }
2578 memset(qedf->global_queues[i]->cq, 0,
2579 qedf->global_queues[i]->cq_mem_size);
2580
2581 qedf->global_queues[i]->cq_pbl =
2582 dma_alloc_coherent(&qedf->pdev->dev,
2583 qedf->global_queues[i]->cq_pbl_size,
2584 &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL);
2585
2586 if (!qedf->global_queues[i]->cq_pbl) {
2587 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2588 "cq PBL.\n");
2589 status = -ENOMEM;
2590 goto mem_alloc_failure;
2591 }
2592 memset(qedf->global_queues[i]->cq_pbl, 0,
2593 qedf->global_queues[i]->cq_pbl_size);
2594
2595 /* Create PBL */
2596 num_pages = qedf->global_queues[i]->cq_mem_size /
2597 QEDF_PAGE_SIZE;
2598 page = qedf->global_queues[i]->cq_dma;
2599 pbl = (u32 *)qedf->global_queues[i]->cq_pbl;
2600
2601 while (num_pages--) {
2602 *pbl = U64_LO(page);
2603 pbl++;
2604 *pbl = U64_HI(page);
2605 pbl++;
2606 page += QEDF_PAGE_SIZE;
2607 }
2608 /* Set the initial consumer index for cq */
2609 qedf->global_queues[i]->cq_cons_idx = 0;
2610 }
2611
2612 list = (u32 *)qedf->p_cpuq;
2613
2614 /*
2615 * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
2616 * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points
2617 * to the physical address which contains an array of pointers to
2618 * the physical addresses of the specific queue pages.
2619 */
2620 for (i = 0; i < qedf->num_queues; i++) {
2621 *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma);
2622 list++;
2623 *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma);
2624 list++;
2625 *list = U64_LO(0);
2626 list++;
2627 *list = U64_HI(0);
2628 list++;
2629 }
2630
2631 return 0;
2632
2633mem_alloc_failure:
2634 qedf_free_global_queues(qedf);
2635 return status;
2636}
2637
2638static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
2639{
2640 u8 sq_num_pbl_pages;
2641 u32 sq_mem_size;
2642 u32 cq_mem_size;
2643 u32 cq_num_entries;
2644 int rval;
2645
2646 /*
2647 * The number of completion queues/fastpath interrupts/status blocks
2648 * we allocation is the minimum off:
2649 *
2650 * Number of CPUs
2651 * Number of MSI-X vectors
2652 * Max number allocated in hardware (QEDF_MAX_NUM_CQS)
2653 */
2654 qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS,
2655 num_online_cpus());
2656
2657 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
2658 qedf->num_queues);
2659
2660 qedf->p_cpuq = pci_alloc_consistent(qedf->pdev,
2661 qedf->num_queues * sizeof(struct qedf_glbl_q_params),
2662 &qedf->hw_p_cpuq);
2663
2664 if (!qedf->p_cpuq) {
2665 QEDF_ERR(&(qedf->dbg_ctx), "pci_alloc_consistent failed.\n");
2666 return 1;
2667 }
2668
2669 rval = qedf_alloc_global_queues(qedf);
2670 if (rval) {
2671 QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation "
2672 "failed.\n");
2673 return 1;
2674 }
2675
2676 /* Calculate SQ PBL size in the same manner as in qedf_sq_alloc() */
2677 sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
2678 sq_mem_size = ALIGN(sq_mem_size, QEDF_PAGE_SIZE);
2679 sq_num_pbl_pages = (sq_mem_size / QEDF_PAGE_SIZE);
2680
2681 /* Calculate CQ num entries */
2682 cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
2683 cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE);
2684 cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe);
2685
2686 memset(&(qedf->pf_params), 0,
2687 sizeof(qedf->pf_params));
2688
2689 /* Setup the value for fcoe PF */
2690 qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS;
2691 qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS;
2692 qedf->pf_params.fcoe_pf_params.glbl_q_params_addr =
2693 (u64)qedf->hw_p_cpuq;
2694 qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages;
2695
2696 qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0;
2697
2698 qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries;
2699 qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues;
2700
2701 /* log_page_size: 12 for 4KB pages */
2702 qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE);
2703
2704 qedf->pf_params.fcoe_pf_params.mtu = 9000;
2705 qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI;
2706 qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI;
2707
2708 /* BDQ address and size */
2709 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] =
2710 qedf->bdq_pbl_list_dma;
2711 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] =
2712 qedf->bdq_pbl_list_num_entries;
2713 qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE;
2714
2715 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2716 "bdq_list=%p bdq_pbl_list_dma=%llx bdq_pbl_list_entries=%d.\n",
2717 qedf->bdq_pbl_list,
2718 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0],
2719 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]);
2720
2721 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2722 "cq_num_entries=%d.\n",
2723 qedf->pf_params.fcoe_pf_params.cq_num_entries);
2724
2725 return 0;
2726}
2727
2728/* Free DMA coherent memory for array of queue pointers we pass to qed */
2729static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
2730{
2731 size_t size = 0;
2732
2733 if (qedf->p_cpuq) {
2734 size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
2735 pci_free_consistent(qedf->pdev, size, qedf->p_cpuq,
2736 qedf->hw_p_cpuq);
2737 }
2738
2739 qedf_free_global_queues(qedf);
2740
2741 if (qedf->global_queues)
2742 kfree(qedf->global_queues);
2743}
2744
2745/*
2746 * PCI driver functions
2747 */
2748
2749static const struct pci_device_id qedf_pci_tbl[] = {
2750 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165c) },
2751 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8080) },
2752 {0}
2753};
2754MODULE_DEVICE_TABLE(pci, qedf_pci_tbl);
2755
2756static struct pci_driver qedf_pci_driver = {
2757 .name = QEDF_MODULE_NAME,
2758 .id_table = qedf_pci_tbl,
2759 .probe = qedf_probe,
2760 .remove = qedf_remove,
2761};
2762
2763static int __qedf_probe(struct pci_dev *pdev, int mode)
2764{
2765 int rc = -EINVAL;
2766 struct fc_lport *lport;
2767 struct qedf_ctx *qedf;
2768 struct Scsi_Host *host;
2769 bool is_vf = false;
2770 struct qed_ll2_params params;
2771 char host_buf[20];
2772 struct qed_link_params link_params;
2773 int status;
2774 void *task_start, *task_end;
2775 struct qed_slowpath_params slowpath_params;
2776 struct qed_probe_params qed_params;
2777 u16 tmp;
2778
2779 /*
2780 * When doing error recovery we didn't reap the lport so don't try
2781 * to reallocate it.
2782 */
2783 if (mode != QEDF_MODE_RECOVERY) {
2784 lport = libfc_host_alloc(&qedf_host_template,
2785 sizeof(struct qedf_ctx));
2786
2787 if (!lport) {
2788 QEDF_ERR(NULL, "Could not allocate lport.\n");
2789 rc = -ENOMEM;
2790 goto err0;
2791 }
2792
2793 /* Initialize qedf_ctx */
2794 qedf = lport_priv(lport);
2795 qedf->lport = lport;
2796 qedf->ctlr.lp = lport;
2797 qedf->pdev = pdev;
2798 qedf->dbg_ctx.pdev = pdev;
2799 qedf->dbg_ctx.host_no = lport->host->host_no;
2800 spin_lock_init(&qedf->hba_lock);
2801 INIT_LIST_HEAD(&qedf->fcports);
2802 qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1;
2803 atomic_set(&qedf->num_offloads, 0);
2804 qedf->stop_io_on_error = false;
2805 pci_set_drvdata(pdev, qedf);
2806
2807 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
2808 "QLogic FastLinQ FCoE Module qedf %s, "
2809 "FW %d.%d.%d.%d\n", QEDF_VERSION,
2810 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
2811 FW_ENGINEERING_VERSION);
2812 } else {
2813 /* Init pointers during recovery */
2814 qedf = pci_get_drvdata(pdev);
2815 lport = qedf->lport;
2816 }
2817
2818 host = lport->host;
2819
2820 /* Allocate mempool for qedf_io_work structs */
2821 qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN,
2822 qedf_io_work_cache);
2823 if (qedf->io_mempool == NULL) {
2824 QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n");
2825 goto err1;
2826 }
2827 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
2828 qedf->io_mempool);
2829
2830 sprintf(host_buf, "qedf_%u_link",
2831 qedf->lport->host->host_no);
2832 qedf->link_update_wq = create_singlethread_workqueue(host_buf);
2833 INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
2834 INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
2835
2836 qedf->fipvlan_retries = qedf_fipvlan_retries;
2837
2838 /*
2839 * Common probe. Takes care of basic hardware init and pci_*
2840 * functions.
2841 */
2842 memset(&qed_params, 0, sizeof(qed_params));
2843 qed_params.protocol = QED_PROTOCOL_FCOE;
2844 qed_params.dp_module = qedf_dp_module;
2845 qed_params.dp_level = qedf_dp_level;
2846 qed_params.is_vf = is_vf;
2847 qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
2848 if (!qedf->cdev) {
2849 rc = -ENODEV;
2850 goto err1;
2851 }
2852
2853 /* queue allocation code should come here
2854 * order should be
2855 * slowpath_start
2856 * status block allocation
2857 * interrupt registration (to get min number of queues)
2858 * set_fcoe_pf_param
2859 * qed_sp_fcoe_func_start
2860 */
2861 rc = qedf_set_fcoe_pf_param(qedf);
2862 if (rc) {
2863 QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n");
2864 goto err2;
2865 }
2866 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
2867
2868 /* Learn information crucial for qedf to progress */
2869 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
2870 if (rc) {
2871 QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
2872 goto err1;
2873 }
2874
2875 /* Record BDQ producer doorbell addresses */
2876 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
2877 qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
2878 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2879 "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod,
2880 qedf->bdq_secondary_prod);
2881
2882 qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf);
2883
2884 rc = qedf_prepare_sb(qedf);
2885 if (rc) {
2886
2887 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
2888 goto err2;
2889 }
2890
2891 /* Start the Slowpath-process */
2892 slowpath_params.int_mode = QED_INT_MODE_MSIX;
2893 slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
2894 slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
2895 slowpath_params.drv_rev = QEDF_DRIVER_REV_VER;
2896 slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER;
2897 memcpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE);
2898 rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
2899 if (rc) {
2900 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
2901 goto err2;
2902 }
2903
2904 /*
2905 * update_pf_params needs to be called before and after slowpath
2906 * start
2907 */
2908 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
2909
2910 /* Setup interrupts */
2911 rc = qedf_setup_int(qedf);
2912 if (rc)
2913 goto err3;
2914
2915 rc = qed_ops->start(qedf->cdev, &qedf->tasks);
2916 if (rc) {
2917 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n");
2918 goto err4;
2919 }
2920 task_start = qedf_get_task_mem(&qedf->tasks, 0);
2921 task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1);
2922 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, "
2923 "end=%p block_size=%u.\n", task_start, task_end,
2924 qedf->tasks.size);
2925
2926 /*
2927 * We need to write the number of BDs in the BDQ we've preallocated so
2928 * the f/w will do a prefetch and we'll get an unsolicited CQE when a
2929 * packet arrives.
2930 */
2931 qedf->bdq_prod_idx = QEDF_BDQ_SIZE;
2932 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2933 "Writing %d to primary and secondary BDQ doorbell registers.\n",
2934 qedf->bdq_prod_idx);
2935 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2936 tmp = readw(qedf->bdq_primary_prod);
2937 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2938 tmp = readw(qedf->bdq_secondary_prod);
2939
2940 qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
2941
2942 /* Now that the dev_info struct has been filled in set the MAC
2943 * address
2944 */
2945 ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac);
2946 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n",
2947 qedf->mac);
2948
2949 /* Set the WWNN and WWPN based on the MAC address */
2950 qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0);
2951 qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0);
2952 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx "
2953 "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
2954
2955 sprintf(host_buf, "host_%d", host->host_no);
2956 qed_ops->common->set_id(qedf->cdev, host_buf, QEDF_VERSION);
2957
2958
2959 /* Set xid max values */
2960 qedf->max_scsi_xid = QEDF_MAX_SCSI_XID;
2961 qedf->max_els_xid = QEDF_MAX_ELS_XID;
2962
2963 /* Allocate cmd mgr */
2964 qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
2965 if (!qedf->cmd_mgr) {
2966 QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
2967 goto err5;
2968 }
2969
2970 if (mode != QEDF_MODE_RECOVERY) {
2971 host->transportt = qedf_fc_transport_template;
2972 host->can_queue = QEDF_MAX_ELS_XID;
2973 host->max_lun = qedf_max_lun;
2974 host->max_cmd_len = QEDF_MAX_CDB_LEN;
2975 rc = scsi_add_host(host, &pdev->dev);
2976 if (rc)
2977 goto err6;
2978 }
2979
2980 memset(&params, 0, sizeof(params));
2981 params.mtu = 9000;
2982 ether_addr_copy(params.ll2_mac_address, qedf->mac);
2983
2984 /* Start LL2 processing thread */
2985 snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no);
2986 qedf->ll2_recv_wq =
2987 create_singlethread_workqueue(host_buf);
2988 if (!qedf->ll2_recv_wq) {
2989 QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
2990 goto err7;
2991 }
2992
2993#ifdef CONFIG_DEBUG_FS
2994 qedf_dbg_host_init(&(qedf->dbg_ctx), &qedf_debugfs_ops,
2995 &qedf_dbg_fops);
2996#endif
2997
2998 /* Start LL2 */
2999 qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf);
3000 rc = qed_ops->ll2->start(qedf->cdev, &params);
3001 if (rc) {
3002 QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n");
3003 goto err7;
3004 }
3005 set_bit(QEDF_LL2_STARTED, &qedf->flags);
3006
3007 /* hw will be insterting vlan tag*/
3008 qedf->vlan_hw_insert = 1;
3009 qedf->vlan_id = 0;
3010
3011 /*
3012 * No need to setup fcoe_ctlr or fc_lport objects during recovery since
3013 * they were not reaped during the unload process.
3014 */
3015 if (mode != QEDF_MODE_RECOVERY) {
3016 /* Setup imbedded fcoe controller */
3017 qedf_fcoe_ctlr_setup(qedf);
3018
3019 /* Setup lport */
3020 rc = qedf_lport_setup(qedf);
3021 if (rc) {
3022 QEDF_ERR(&(qedf->dbg_ctx),
3023 "qedf_lport_setup failed.\n");
3024 goto err7;
3025 }
3026 }
3027
3028 sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no);
3029 qedf->timer_work_queue =
3030 create_singlethread_workqueue(host_buf);
3031 if (!qedf->timer_work_queue) {
3032 QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
3033 "workqueue.\n");
3034 goto err7;
3035 }
3036
3037 /* DPC workqueue is not reaped during recovery unload */
3038 if (mode != QEDF_MODE_RECOVERY) {
3039 sprintf(host_buf, "qedf_%u_dpc",
3040 qedf->lport->host->host_no);
3041 qedf->dpc_wq = create_singlethread_workqueue(host_buf);
3042 }
3043
3044 /*
3045 * GRC dump and sysfs parameters are not reaped during the recovery
3046 * unload process.
3047 */
3048 if (mode != QEDF_MODE_RECOVERY) {
3049 qedf->grcdump_size = qed_ops->common->dbg_grc_size(qedf->cdev);
3050 if (qedf->grcdump_size) {
3051 rc = qedf_alloc_grc_dump_buf(&qedf->grcdump,
3052 qedf->grcdump_size);
3053 if (rc) {
3054 QEDF_ERR(&(qedf->dbg_ctx),
3055 "GRC Dump buffer alloc failed.\n");
3056 qedf->grcdump = NULL;
3057 }
3058
3059 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3060 "grcdump: addr=%p, size=%u.\n",
3061 qedf->grcdump, qedf->grcdump_size);
3062 }
3063 qedf_create_sysfs_ctx_attr(qedf);
3064
3065 /* Initialize I/O tracing for this adapter */
3066 spin_lock_init(&qedf->io_trace_lock);
3067 qedf->io_trace_idx = 0;
3068 }
3069
3070 init_completion(&qedf->flogi_compl);
3071
3072 memset(&link_params, 0, sizeof(struct qed_link_params));
3073 link_params.link_up = true;
3074 status = qed_ops->common->set_link(qedf->cdev, &link_params);
3075 if (status)
3076 QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n");
3077
3078 /* Start/restart discovery */
3079 if (mode == QEDF_MODE_RECOVERY)
3080 fcoe_ctlr_link_up(&qedf->ctlr);
3081 else
3082 fc_fabric_login(lport);
3083
3084 /* All good */
3085 return 0;
3086
3087err7:
3088 if (qedf->ll2_recv_wq)
3089 destroy_workqueue(qedf->ll2_recv_wq);
3090 fc_remove_host(qedf->lport->host);
3091 scsi_remove_host(qedf->lport->host);
3092#ifdef CONFIG_DEBUG_FS
3093 qedf_dbg_host_exit(&(qedf->dbg_ctx));
3094#endif
3095err6:
3096 qedf_cmd_mgr_free(qedf->cmd_mgr);
3097err5:
3098 qed_ops->stop(qedf->cdev);
3099err4:
3100 qedf_free_fcoe_pf_param(qedf);
3101 qedf_sync_free_irqs(qedf);
3102err3:
3103 qed_ops->common->slowpath_stop(qedf->cdev);
3104err2:
3105 qed_ops->common->remove(qedf->cdev);
3106err1:
3107 scsi_host_put(lport->host);
3108err0:
3109 return rc;
3110}
3111
3112static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3113{
3114 return __qedf_probe(pdev, QEDF_MODE_NORMAL);
3115}
3116
3117static void __qedf_remove(struct pci_dev *pdev, int mode)
3118{
3119 struct qedf_ctx *qedf;
3120
3121 if (!pdev) {
3122 QEDF_ERR(NULL, "pdev is NULL.\n");
3123 return;
3124 }
3125
3126 qedf = pci_get_drvdata(pdev);
3127
3128 /*
3129 * Prevent race where we're in board disable work and then try to
3130 * rmmod the module.
3131 */
3132 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
3133 QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n");
3134 return;
3135 }
3136
3137 if (mode != QEDF_MODE_RECOVERY)
3138 set_bit(QEDF_UNLOADING, &qedf->flags);
3139
3140 /* Logoff the fabric to upload all connections */
3141 if (mode == QEDF_MODE_RECOVERY)
3142 fcoe_ctlr_link_down(&qedf->ctlr);
3143 else
3144 fc_fabric_logoff(qedf->lport);
3145 qedf_wait_for_upload(qedf);
3146
3147#ifdef CONFIG_DEBUG_FS
3148 qedf_dbg_host_exit(&(qedf->dbg_ctx));
3149#endif
3150
3151 /* Stop any link update handling */
3152 cancel_delayed_work_sync(&qedf->link_update);
3153 destroy_workqueue(qedf->link_update_wq);
3154 qedf->link_update_wq = NULL;
3155
3156 if (qedf->timer_work_queue)
3157 destroy_workqueue(qedf->timer_work_queue);
3158
3159 /* Stop Light L2 */
3160 clear_bit(QEDF_LL2_STARTED, &qedf->flags);
3161 qed_ops->ll2->stop(qedf->cdev);
3162 if (qedf->ll2_recv_wq)
3163 destroy_workqueue(qedf->ll2_recv_wq);
3164
3165 /* Stop fastpath */
3166 qedf_sync_free_irqs(qedf);
3167 qedf_destroy_sb(qedf);
3168
3169 /*
3170 * During recovery don't destroy OS constructs that represent the
3171 * physical port.
3172 */
3173 if (mode != QEDF_MODE_RECOVERY) {
3174 qedf_free_grc_dump_buf(&qedf->grcdump);
3175 qedf_remove_sysfs_ctx_attr(qedf);
3176
3177 /* Remove all SCSI/libfc/libfcoe structures */
3178 fcoe_ctlr_destroy(&qedf->ctlr);
3179 fc_lport_destroy(qedf->lport);
3180 fc_remove_host(qedf->lport->host);
3181 scsi_remove_host(qedf->lport->host);
3182 }
3183
3184 qedf_cmd_mgr_free(qedf->cmd_mgr);
3185
3186 if (mode != QEDF_MODE_RECOVERY) {
3187 fc_exch_mgr_free(qedf->lport);
3188 fc_lport_free_stats(qedf->lport);
3189
3190 /* Wait for all vports to be reaped */
3191 qedf_wait_for_vport_destroy(qedf);
3192 }
3193
3194 /*
3195 * Now that all connections have been uploaded we can stop the
3196 * rest of the qed operations
3197 */
3198 qed_ops->stop(qedf->cdev);
3199
3200 if (mode != QEDF_MODE_RECOVERY) {
3201 if (qedf->dpc_wq) {
3202 /* Stop general DPC handling */
3203 destroy_workqueue(qedf->dpc_wq);
3204 qedf->dpc_wq = NULL;
3205 }
3206 }
3207
3208 /* Final shutdown for the board */
3209 qedf_free_fcoe_pf_param(qedf);
3210 if (mode != QEDF_MODE_RECOVERY) {
3211 qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3212 pci_set_drvdata(pdev, NULL);
3213 }
3214 qed_ops->common->slowpath_stop(qedf->cdev);
3215 qed_ops->common->remove(qedf->cdev);
3216
3217 mempool_destroy(qedf->io_mempool);
3218
3219 /* Only reap the Scsi_host on a real removal */
3220 if (mode != QEDF_MODE_RECOVERY)
3221 scsi_host_put(qedf->lport->host);
3222}
3223
3224static void qedf_remove(struct pci_dev *pdev)
3225{
3226 /* Check to make sure this function wasn't already disabled */
3227 if (!atomic_read(&pdev->enable_cnt))
3228 return;
3229
3230 __qedf_remove(pdev, QEDF_MODE_NORMAL);
3231}
3232
3233/*
3234 * Module Init/Remove
3235 */
3236
3237static int __init qedf_init(void)
3238{
3239 int ret;
3240
3241 /* If debug=1 passed, set the default log mask */
3242 if (qedf_debug == QEDF_LOG_DEFAULT)
3243 qedf_debug = QEDF_DEFAULT_LOG_MASK;
3244
3245 /* Print driver banner */
3246 QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR,
3247 QEDF_VERSION);
3248
3249 /* Create kmem_cache for qedf_io_work structs */
3250 qedf_io_work_cache = kmem_cache_create("qedf_io_work_cache",
3251 sizeof(struct qedf_io_work), 0, SLAB_HWCACHE_ALIGN, NULL);
3252 if (qedf_io_work_cache == NULL) {
3253 QEDF_ERR(NULL, "qedf_io_work_cache is NULL.\n");
3254 goto err1;
3255 }
3256 QEDF_INFO(NULL, QEDF_LOG_DISC, "qedf_io_work_cache=%p.\n",
3257 qedf_io_work_cache);
3258
3259 qed_ops = qed_get_fcoe_ops();
3260 if (!qed_ops) {
3261 QEDF_ERR(NULL, "Failed to get qed fcoe operations\n");
3262 goto err1;
3263 }
3264
3265#ifdef CONFIG_DEBUG_FS
3266 qedf_dbg_init("qedf");
3267#endif
3268
3269 qedf_fc_transport_template =
3270 fc_attach_transport(&qedf_fc_transport_fn);
3271 if (!qedf_fc_transport_template) {
3272 QEDF_ERR(NULL, "Could not register with FC transport\n");
3273 goto err2;
3274 }
3275
3276 qedf_fc_vport_transport_template =
3277 fc_attach_transport(&qedf_fc_vport_transport_fn);
3278 if (!qedf_fc_vport_transport_template) {
3279 QEDF_ERR(NULL, "Could not register vport template with FC "
3280 "transport\n");
3281 goto err3;
3282 }
3283
3284 qedf_io_wq = create_workqueue("qedf_io_wq");
3285 if (!qedf_io_wq) {
3286 QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
3287 goto err4;
3288 }
3289
3290 qedf_cb_ops.get_login_failures = qedf_get_login_failures;
3291
3292 ret = pci_register_driver(&qedf_pci_driver);
3293 if (ret) {
3294 QEDF_ERR(NULL, "Failed to register driver\n");
3295 goto err5;
3296 }
3297
3298 return 0;
3299
3300err5:
3301 destroy_workqueue(qedf_io_wq);
3302err4:
3303 fc_release_transport(qedf_fc_vport_transport_template);
3304err3:
3305 fc_release_transport(qedf_fc_transport_template);
3306err2:
3307#ifdef CONFIG_DEBUG_FS
3308 qedf_dbg_exit();
3309#endif
3310 qed_put_fcoe_ops();
3311err1:
3312 return -EINVAL;
3313}
3314
3315static void __exit qedf_cleanup(void)
3316{
3317 pci_unregister_driver(&qedf_pci_driver);
3318
3319 destroy_workqueue(qedf_io_wq);
3320
3321 fc_release_transport(qedf_fc_vport_transport_template);
3322 fc_release_transport(qedf_fc_transport_template);
3323#ifdef CONFIG_DEBUG_FS
3324 qedf_dbg_exit();
3325#endif
3326 qed_put_fcoe_ops();
3327
3328 kmem_cache_destroy(qedf_io_work_cache);
3329}
3330
3331MODULE_LICENSE("GPL");
3332MODULE_DESCRIPTION("QLogic QEDF 25/40/50/100Gb FCoE Driver");
3333MODULE_AUTHOR("QLogic Corporation");
3334MODULE_VERSION(QEDF_VERSION);
3335module_init(qedf_init);
3336module_exit(qedf_cleanup);
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
new file mode 100644
index 000000000000..4ae5f537a440
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -0,0 +1,15 @@
1/*
2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#define QEDF_VERSION "8.10.7.0"
11#define QEDF_DRIVER_MAJOR_VER 8
12#define QEDF_DRIVER_MINOR_VER 10
13#define QEDF_DRIVER_REV_VER 7
14#define QEDF_DRIVER_ENG_VER 0
15