aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/bnx2fc
diff options
context:
space:
mode:
authorBhanu Gollapudi <bprakash@broadcom.com>2011-02-04 15:10:34 -0500
committerJames Bottomley <James.Bottomley@suse.de>2011-02-28 19:40:27 -0500
commit853e2bd2103aaa91d1ba1c0b57ba17628d836f03 (patch)
treea8e0cae98856eac066cb9e598a5b9693c27cb2da /drivers/scsi/bnx2fc
parentd2f809528a3534ea295b6d855c33cbbb3369d8c9 (diff)
[SCSI] bnx2fc: Broadcom FCoE offload driver
This driver is for Broadcom Netxtreme II 57712 chip. The following patch contains the driver sources for bnx2fc driver. libfc/libfcoe changes to enable bnx2fc have already gone through the fcoe tree. bnx2fc is a SCSI low level driver that interfaces with SCSI midlayer, libfc, libfcoe, cnic modules. bnx2fc driver uses services of libfc for slow path operations such as FIP and fabric discovery. The fast path IO perations are performed after offloading the session information to the underlying FCoE firmware. Signed-off-by: Bhanu Prakash Gollapudi <bprakash@broadcom.com> Reviewed-by: Mike Christie <michaelc@cs.wisc.edu> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/bnx2fc')
-rw-r--r--drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h1080
-rw-r--r--drivers/scsi/bnx2fc/Kconfig11
-rw-r--r--drivers/scsi/bnx2fc/Makefile3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h511
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_constants.h206
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h70
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c515
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2535
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c1868
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c1833
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c844
11 files changed, 9476 insertions, 0 deletions
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
new file mode 100644
index 000000000000..69d031d98469
--- /dev/null
+++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
@@ -0,0 +1,1080 @@
1#ifndef __57XX_FCOE_HSI_LINUX_LE__
2#define __57XX_FCOE_HSI_LINUX_LE__
3
4/*
5 * common data for all protocols
6 */
7struct b577xx_doorbell_hdr {
8 u8 header;
9#define B577XX_DOORBELL_HDR_RX (0x1<<0)
10#define B577XX_DOORBELL_HDR_RX_SHIFT 0
11#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1)
12#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1
13#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2)
14#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2
15#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4)
16#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4
17};
18
19/*
20 * doorbell message sent to the chip
21 */
22struct b577xx_doorbell_set_prod {
23#if defined(__BIG_ENDIAN)
24 u16 prod;
25 u8 zero_fill1;
26 struct b577xx_doorbell_hdr header;
27#elif defined(__LITTLE_ENDIAN)
28 struct b577xx_doorbell_hdr header;
29 u8 zero_fill1;
30 u16 prod;
31#endif
32};
33
34
35struct regpair {
36 __le32 lo;
37 __le32 hi;
38};
39
40
41/*
42 * Fixed size structure in order to plant it in Union structure
43 */
44struct fcoe_abts_rsp_union {
45 u32 r_ctl;
46 u32 abts_rsp_payload[7];
47};
48
49
50/*
51 * 4 regs size
52 */
53struct fcoe_bd_ctx {
54 u32 buf_addr_hi;
55 u32 buf_addr_lo;
56#if defined(__BIG_ENDIAN)
57 u16 rsrv0;
58 u16 buf_len;
59#elif defined(__LITTLE_ENDIAN)
60 u16 buf_len;
61 u16 rsrv0;
62#endif
63#if defined(__BIG_ENDIAN)
64 u16 rsrv1;
65 u16 flags;
66#elif defined(__LITTLE_ENDIAN)
67 u16 flags;
68 u16 rsrv1;
69#endif
70};
71
72
73struct fcoe_cleanup_flow_info {
74#if defined(__BIG_ENDIAN)
75 u16 reserved1;
76 u16 task_id;
77#elif defined(__LITTLE_ENDIAN)
78 u16 task_id;
79 u16 reserved1;
80#endif
81 u32 reserved2[7];
82};
83
84
85struct fcoe_fcp_cmd_payload {
86 u32 opaque[8];
87};
88
89struct fcoe_fc_hdr {
90#if defined(__BIG_ENDIAN)
91 u8 cs_ctl;
92 u8 s_id[3];
93#elif defined(__LITTLE_ENDIAN)
94 u8 s_id[3];
95 u8 cs_ctl;
96#endif
97#if defined(__BIG_ENDIAN)
98 u8 r_ctl;
99 u8 d_id[3];
100#elif defined(__LITTLE_ENDIAN)
101 u8 d_id[3];
102 u8 r_ctl;
103#endif
104#if defined(__BIG_ENDIAN)
105 u8 seq_id;
106 u8 df_ctl;
107 u16 seq_cnt;
108#elif defined(__LITTLE_ENDIAN)
109 u16 seq_cnt;
110 u8 df_ctl;
111 u8 seq_id;
112#endif
113#if defined(__BIG_ENDIAN)
114 u8 type;
115 u8 f_ctl[3];
116#elif defined(__LITTLE_ENDIAN)
117 u8 f_ctl[3];
118 u8 type;
119#endif
120 u32 parameters;
121#if defined(__BIG_ENDIAN)
122 u16 ox_id;
123 u16 rx_id;
124#elif defined(__LITTLE_ENDIAN)
125 u16 rx_id;
126 u16 ox_id;
127#endif
128};
129
130struct fcoe_fc_frame {
131 struct fcoe_fc_hdr fc_hdr;
132 u32 reserved0[2];
133};
134
135union fcoe_cmd_flow_info {
136 struct fcoe_fcp_cmd_payload fcp_cmd_payload;
137 struct fcoe_fc_frame mp_fc_frame;
138};
139
140
141
142struct fcoe_fcp_rsp_flags {
143 u8 flags;
144#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
145#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
146#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
147#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
148#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
149#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
150#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
151#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
152#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4)
153#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
154#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5)
155#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
156};
157
158
159struct fcoe_fcp_rsp_payload {
160 struct regpair reserved0;
161 u32 fcp_resid;
162#if defined(__BIG_ENDIAN)
163 u16 retry_delay_timer;
164 struct fcoe_fcp_rsp_flags fcp_flags;
165 u8 scsi_status_code;
166#elif defined(__LITTLE_ENDIAN)
167 u8 scsi_status_code;
168 struct fcoe_fcp_rsp_flags fcp_flags;
169 u16 retry_delay_timer;
170#endif
171 u32 fcp_rsp_len;
172 u32 fcp_sns_len;
173};
174
175
176/*
177 * Fixed size structure in order to plant it in Union structure
178 */
179struct fcoe_fcp_rsp_union {
180 struct fcoe_fcp_rsp_payload payload;
181 struct regpair reserved0;
182};
183
184
185struct fcoe_fcp_xfr_rdy_payload {
186 u32 burst_len;
187 u32 data_ro;
188};
189
190struct fcoe_read_flow_info {
191 struct fcoe_fc_hdr fc_data_in_hdr;
192 u32 reserved[2];
193};
194
195struct fcoe_write_flow_info {
196 struct fcoe_fc_hdr fc_data_out_hdr;
197 struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload;
198};
199
200union fcoe_rsp_flow_info {
201 struct fcoe_fcp_rsp_union fcp_rsp;
202 struct fcoe_abts_rsp_union abts_rsp;
203};
204
205/*
206 * 32 bytes used for general purposes
207 */
208union fcoe_general_task_ctx {
209 union fcoe_cmd_flow_info cmd_info;
210 struct fcoe_read_flow_info read_info;
211 struct fcoe_write_flow_info write_info;
212 union fcoe_rsp_flow_info rsp_info;
213 struct fcoe_cleanup_flow_info cleanup_info;
214 u32 comp_info[8];
215};
216
217
218/*
219 * FCoE KCQ CQE parameters
220 */
221union fcoe_kcqe_params {
222 u32 reserved0[4];
223};
224
225/*
226 * FCoE KCQ CQE
227 */
228struct fcoe_kcqe {
229 u32 fcoe_conn_id;
230 u32 completion_status;
231 u32 fcoe_conn_context_id;
232 union fcoe_kcqe_params params;
233#if defined(__BIG_ENDIAN)
234 u8 flags;
235#define FCOE_KCQE_RESERVED0 (0x7<<0)
236#define FCOE_KCQE_RESERVED0_SHIFT 0
237#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
238#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
239#define FCOE_KCQE_LAYER_CODE (0x7<<4)
240#define FCOE_KCQE_LAYER_CODE_SHIFT 4
241#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
242#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
243 u8 op_code;
244 u16 qe_self_seq;
245#elif defined(__LITTLE_ENDIAN)
246 u16 qe_self_seq;
247 u8 op_code;
248 u8 flags;
249#define FCOE_KCQE_RESERVED0 (0x7<<0)
250#define FCOE_KCQE_RESERVED0_SHIFT 0
251#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
252#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
253#define FCOE_KCQE_LAYER_CODE (0x7<<4)
254#define FCOE_KCQE_LAYER_CODE_SHIFT 4
255#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
256#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
257#endif
258};
259
260/*
261 * FCoE KWQE header
262 */
263struct fcoe_kwqe_header {
264#if defined(__BIG_ENDIAN)
265 u8 flags;
266#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
267#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
268#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
269#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
270#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
271#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
272 u8 op_code;
273#elif defined(__LITTLE_ENDIAN)
274 u8 op_code;
275 u8 flags;
276#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
277#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
278#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
279#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
280#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
281#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
282#endif
283};
284
285/*
286 * FCoE firmware init request 1
287 */
288struct fcoe_kwqe_init1 {
289#if defined(__BIG_ENDIAN)
290 struct fcoe_kwqe_header hdr;
291 u16 num_tasks;
292#elif defined(__LITTLE_ENDIAN)
293 u16 num_tasks;
294 struct fcoe_kwqe_header hdr;
295#endif
296 u32 task_list_pbl_addr_lo;
297 u32 task_list_pbl_addr_hi;
298 u32 dummy_buffer_addr_lo;
299 u32 dummy_buffer_addr_hi;
300#if defined(__BIG_ENDIAN)
301 u16 rq_num_wqes;
302 u16 sq_num_wqes;
303#elif defined(__LITTLE_ENDIAN)
304 u16 sq_num_wqes;
305 u16 rq_num_wqes;
306#endif
307#if defined(__BIG_ENDIAN)
308 u16 cq_num_wqes;
309 u16 rq_buffer_log_size;
310#elif defined(__LITTLE_ENDIAN)
311 u16 rq_buffer_log_size;
312 u16 cq_num_wqes;
313#endif
314#if defined(__BIG_ENDIAN)
315 u8 flags;
316#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
317#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
318#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
319#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
320#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
321#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
322 u8 num_sessions_log;
323 u16 mtu;
324#elif defined(__LITTLE_ENDIAN)
325 u16 mtu;
326 u8 num_sessions_log;
327 u8 flags;
328#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
329#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
330#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
331#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
332#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
333#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
334#endif
335};
336
337/*
338 * FCoE firmware init request 2
339 */
340struct fcoe_kwqe_init2 {
341#if defined(__BIG_ENDIAN)
342 struct fcoe_kwqe_header hdr;
343 u16 reserved0;
344#elif defined(__LITTLE_ENDIAN)
345 u16 reserved0;
346 struct fcoe_kwqe_header hdr;
347#endif
348 u32 hash_tbl_pbl_addr_lo;
349 u32 hash_tbl_pbl_addr_hi;
350 u32 t2_hash_tbl_addr_lo;
351 u32 t2_hash_tbl_addr_hi;
352 u32 t2_ptr_hash_tbl_addr_lo;
353 u32 t2_ptr_hash_tbl_addr_hi;
354 u32 free_list_count;
355};
356
357/*
358 * FCoE firmware init request 3
359 */
360struct fcoe_kwqe_init3 {
361#if defined(__BIG_ENDIAN)
362 struct fcoe_kwqe_header hdr;
363 u16 reserved0;
364#elif defined(__LITTLE_ENDIAN)
365 u16 reserved0;
366 struct fcoe_kwqe_header hdr;
367#endif
368 u32 error_bit_map_lo;
369 u32 error_bit_map_hi;
370#if defined(__BIG_ENDIAN)
371 u8 reserved21[3];
372 u8 cached_session_enable;
373#elif defined(__LITTLE_ENDIAN)
374 u8 cached_session_enable;
375 u8 reserved21[3];
376#endif
377 u32 reserved2[4];
378};
379
380/*
381 * FCoE connection offload request 1
382 */
383struct fcoe_kwqe_conn_offload1 {
384#if defined(__BIG_ENDIAN)
385 struct fcoe_kwqe_header hdr;
386 u16 fcoe_conn_id;
387#elif defined(__LITTLE_ENDIAN)
388 u16 fcoe_conn_id;
389 struct fcoe_kwqe_header hdr;
390#endif
391 u32 sq_addr_lo;
392 u32 sq_addr_hi;
393 u32 rq_pbl_addr_lo;
394 u32 rq_pbl_addr_hi;
395 u32 rq_first_pbe_addr_lo;
396 u32 rq_first_pbe_addr_hi;
397#if defined(__BIG_ENDIAN)
398 u16 reserved0;
399 u16 rq_prod;
400#elif defined(__LITTLE_ENDIAN)
401 u16 rq_prod;
402 u16 reserved0;
403#endif
404};
405
406/*
407 * FCoE connection offload request 2
408 */
409struct fcoe_kwqe_conn_offload2 {
410#if defined(__BIG_ENDIAN)
411 struct fcoe_kwqe_header hdr;
412 u16 tx_max_fc_pay_len;
413#elif defined(__LITTLE_ENDIAN)
414 u16 tx_max_fc_pay_len;
415 struct fcoe_kwqe_header hdr;
416#endif
417 u32 cq_addr_lo;
418 u32 cq_addr_hi;
419 u32 xferq_addr_lo;
420 u32 xferq_addr_hi;
421 u32 conn_db_addr_lo;
422 u32 conn_db_addr_hi;
423 u32 reserved1;
424};
425
426/*
427 * FCoE connection offload request 3
428 */
429struct fcoe_kwqe_conn_offload3 {
430#if defined(__BIG_ENDIAN)
431 struct fcoe_kwqe_header hdr;
432 u16 vlan_tag;
433#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
434#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
435#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
436#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
437#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
438#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
439#elif defined(__LITTLE_ENDIAN)
440 u16 vlan_tag;
441#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
442#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
443#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
444#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
445#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
446#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
447 struct fcoe_kwqe_header hdr;
448#endif
449#if defined(__BIG_ENDIAN)
450 u8 tx_max_conc_seqs_c3;
451 u8 s_id[3];
452#elif defined(__LITTLE_ENDIAN)
453 u8 s_id[3];
454 u8 tx_max_conc_seqs_c3;
455#endif
456#if defined(__BIG_ENDIAN)
457 u8 flags;
458#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
459#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
460#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
461#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
462#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
463#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
464#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
465#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
466#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
467#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
468#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
469#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
470#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
471#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
472#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
473#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
474 u8 d_id[3];
475#elif defined(__LITTLE_ENDIAN)
476 u8 d_id[3];
477 u8 flags;
478#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
479#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
480#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
481#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
482#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
483#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
484#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
485#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
486#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
487#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
488#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
489#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
490#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
491#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
492#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
493#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
494#endif
495 u32 reserved;
496 u32 confq_first_pbe_addr_lo;
497 u32 confq_first_pbe_addr_hi;
498#if defined(__BIG_ENDIAN)
499 u16 rx_max_fc_pay_len;
500 u16 tx_total_conc_seqs;
501#elif defined(__LITTLE_ENDIAN)
502 u16 tx_total_conc_seqs;
503 u16 rx_max_fc_pay_len;
504#endif
505#if defined(__BIG_ENDIAN)
506 u8 rx_open_seqs_exch_c3;
507 u8 rx_max_conc_seqs_c3;
508 u16 rx_total_conc_seqs;
509#elif defined(__LITTLE_ENDIAN)
510 u16 rx_total_conc_seqs;
511 u8 rx_max_conc_seqs_c3;
512 u8 rx_open_seqs_exch_c3;
513#endif
514};
515
516/*
517 * FCoE connection offload request 4
518 */
519struct fcoe_kwqe_conn_offload4 {
520#if defined(__BIG_ENDIAN)
521 struct fcoe_kwqe_header hdr;
522 u8 reserved2;
523 u8 e_d_tov_timer_val;
524#elif defined(__LITTLE_ENDIAN)
525 u8 e_d_tov_timer_val;
526 u8 reserved2;
527 struct fcoe_kwqe_header hdr;
528#endif
529 u8 src_mac_addr_lo32[4];
530#if defined(__BIG_ENDIAN)
531 u8 dst_mac_addr_hi16[2];
532 u8 src_mac_addr_hi16[2];
533#elif defined(__LITTLE_ENDIAN)
534 u8 src_mac_addr_hi16[2];
535 u8 dst_mac_addr_hi16[2];
536#endif
537 u8 dst_mac_addr_lo32[4];
538 u32 lcq_addr_lo;
539 u32 lcq_addr_hi;
540 u32 confq_pbl_base_addr_lo;
541 u32 confq_pbl_base_addr_hi;
542};
543
544/*
545 * FCoE connection enable request
546 */
547struct fcoe_kwqe_conn_enable_disable {
548#if defined(__BIG_ENDIAN)
549 struct fcoe_kwqe_header hdr;
550 u16 reserved0;
551#elif defined(__LITTLE_ENDIAN)
552 u16 reserved0;
553 struct fcoe_kwqe_header hdr;
554#endif
555 u8 src_mac_addr_lo32[4];
556#if defined(__BIG_ENDIAN)
557 u16 vlan_tag;
558#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
559#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
560#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
561#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
562#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
563#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
564 u8 src_mac_addr_hi16[2];
565#elif defined(__LITTLE_ENDIAN)
566 u8 src_mac_addr_hi16[2];
567 u16 vlan_tag;
568#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
569#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
570#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
571#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
572#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
573#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
574#endif
575 u8 dst_mac_addr_lo32[4];
576#if defined(__BIG_ENDIAN)
577 u16 reserved1;
578 u8 dst_mac_addr_hi16[2];
579#elif defined(__LITTLE_ENDIAN)
580 u8 dst_mac_addr_hi16[2];
581 u16 reserved1;
582#endif
583#if defined(__BIG_ENDIAN)
584 u8 vlan_flag;
585 u8 s_id[3];
586#elif defined(__LITTLE_ENDIAN)
587 u8 s_id[3];
588 u8 vlan_flag;
589#endif
590#if defined(__BIG_ENDIAN)
591 u8 reserved3;
592 u8 d_id[3];
593#elif defined(__LITTLE_ENDIAN)
594 u8 d_id[3];
595 u8 reserved3;
596#endif
597 u32 context_id;
598 u32 conn_id;
599 u32 reserved4;
600};
601
602/*
603 * FCoE connection destroy request
604 */
605struct fcoe_kwqe_conn_destroy {
606#if defined(__BIG_ENDIAN)
607 struct fcoe_kwqe_header hdr;
608 u16 reserved0;
609#elif defined(__LITTLE_ENDIAN)
610 u16 reserved0;
611 struct fcoe_kwqe_header hdr;
612#endif
613 u32 context_id;
614 u32 conn_id;
615 u32 reserved1[5];
616};
617
618/*
619 * FCoe destroy request
620 */
621struct fcoe_kwqe_destroy {
622#if defined(__BIG_ENDIAN)
623 struct fcoe_kwqe_header hdr;
624 u16 reserved0;
625#elif defined(__LITTLE_ENDIAN)
626 u16 reserved0;
627 struct fcoe_kwqe_header hdr;
628#endif
629 u32 reserved1[7];
630};
631
632/*
633 * FCoe statistics request
634 */
635struct fcoe_kwqe_stat {
636#if defined(__BIG_ENDIAN)
637 struct fcoe_kwqe_header hdr;
638 u16 reserved0;
639#elif defined(__LITTLE_ENDIAN)
640 u16 reserved0;
641 struct fcoe_kwqe_header hdr;
642#endif
643 u32 stat_params_addr_lo;
644 u32 stat_params_addr_hi;
645 u32 reserved1[5];
646};
647
648/*
649 * FCoE KWQ WQE
650 */
651union fcoe_kwqe {
652 struct fcoe_kwqe_init1 init1;
653 struct fcoe_kwqe_init2 init2;
654 struct fcoe_kwqe_init3 init3;
655 struct fcoe_kwqe_conn_offload1 conn_offload1;
656 struct fcoe_kwqe_conn_offload2 conn_offload2;
657 struct fcoe_kwqe_conn_offload3 conn_offload3;
658 struct fcoe_kwqe_conn_offload4 conn_offload4;
659 struct fcoe_kwqe_conn_enable_disable conn_enable_disable;
660 struct fcoe_kwqe_conn_destroy conn_destroy;
661 struct fcoe_kwqe_destroy destroy;
662 struct fcoe_kwqe_stat statistics;
663};
664
665struct fcoe_mul_sges_ctx {
666 struct regpair cur_sge_addr;
667#if defined(__BIG_ENDIAN)
668 u8 sgl_size;
669 u8 cur_sge_idx;
670 u16 cur_sge_off;
671#elif defined(__LITTLE_ENDIAN)
672 u16 cur_sge_off;
673 u8 cur_sge_idx;
674 u8 sgl_size;
675#endif
676};
677
678struct fcoe_s_stat_ctx {
679 u8 flags;
680#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
681#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0
682#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1)
683#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1
684#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2)
685#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2
686#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3)
687#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3
688#define FCOE_S_STAT_CTX_P_RJT (0x1<<4)
689#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4
690#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5)
691#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5
692#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6)
693#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
694};
695
696struct fcoe_seq_ctx {
697#if defined(__BIG_ENDIAN)
698 u16 low_seq_cnt;
699 struct fcoe_s_stat_ctx s_stat;
700 u8 seq_id;
701#elif defined(__LITTLE_ENDIAN)
702 u8 seq_id;
703 struct fcoe_s_stat_ctx s_stat;
704 u16 low_seq_cnt;
705#endif
706#if defined(__BIG_ENDIAN)
707 u16 err_seq_cnt;
708 u16 high_seq_cnt;
709#elif defined(__LITTLE_ENDIAN)
710 u16 high_seq_cnt;
711 u16 err_seq_cnt;
712#endif
713 u32 low_exp_ro;
714 u32 high_exp_ro;
715};
716
717
718struct fcoe_single_sge_ctx {
719 struct regpair cur_buf_addr;
720#if defined(__BIG_ENDIAN)
721 u16 reserved0;
722 u16 cur_buf_rem;
723#elif defined(__LITTLE_ENDIAN)
724 u16 cur_buf_rem;
725 u16 reserved0;
726#endif
727};
728
729union fcoe_sgl_ctx {
730 struct fcoe_single_sge_ctx single_sge;
731 struct fcoe_mul_sges_ctx mul_sges;
732};
733
734
735
736/*
737 * FCoE SQ element
738 */
739struct fcoe_sqe {
740 u16 wqe;
741#define FCOE_SQE_TASK_ID (0x7FFF<<0)
742#define FCOE_SQE_TASK_ID_SHIFT 0
743#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
744#define FCOE_SQE_TOGGLE_BIT_SHIFT 15
745};
746
747
748
749struct fcoe_task_ctx_entry_tx_only {
750 union fcoe_sgl_ctx sgl_ctx;
751};
752
753struct fcoe_task_ctx_entry_txwr_rxrd {
754#if defined(__BIG_ENDIAN)
755 u16 verify_tx_seq;
756 u8 init_flags;
757#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
758#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
759#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
760#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
761#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
762#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
763#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
764#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
765#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
766#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
767 u8 tx_flags;
768#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
769#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
770#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
771#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
772#elif defined(__LITTLE_ENDIAN)
773 u8 tx_flags;
774#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
775#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
776#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
777#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
778 u8 init_flags;
779#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
780#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
781#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
782#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
783#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
784#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
785#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
786#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
787#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
788#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
789 u16 verify_tx_seq;
790#endif
791};
792
793/*
794 * Common section. Both TX and RX processing might write and read from it in
795 * different flows
796 */
797struct fcoe_task_ctx_entry_tx_rx_cmn {
798 u32 data_2_trns;
799 union fcoe_general_task_ctx general;
800#if defined(__BIG_ENDIAN)
801 u16 tx_low_seq_cnt;
802 struct fcoe_s_stat_ctx tx_s_stat;
803 u8 tx_seq_id;
804#elif defined(__LITTLE_ENDIAN)
805 u8 tx_seq_id;
806 struct fcoe_s_stat_ctx tx_s_stat;
807 u16 tx_low_seq_cnt;
808#endif
809 u32 common_flags;
810#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0)
811#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0
812#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24)
813#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24
814#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25)
815#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25
816#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26)
817#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26
818#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27)
819#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27
820#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28)
821#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28
822#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29)
823#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29
824};
825
826struct fcoe_task_ctx_entry_rxwr_txrd {
827#if defined(__BIG_ENDIAN)
828 u16 rx_id;
829 u16 rx_flags;
830#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
831#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
832#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
833#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
834#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
835#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
836#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
837#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
838#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
839#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
840#elif defined(__LITTLE_ENDIAN)
841 u16 rx_flags;
842#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
843#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
844#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
845#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
846#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
847#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
848#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
849#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
850#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
851#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
852 u16 rx_id;
853#endif
854};
855
856struct fcoe_task_ctx_entry_rx_only {
857 struct fcoe_seq_ctx seq_ctx;
858 struct fcoe_seq_ctx ooo_seq_ctx;
859 u32 rsrv3;
860 union fcoe_sgl_ctx sgl_ctx;
861};
862
863struct fcoe_task_ctx_entry {
864 struct fcoe_task_ctx_entry_tx_only tx_wr_only;
865 struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
866 struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
867 struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
868 struct fcoe_task_ctx_entry_rx_only rx_wr_only;
869 u32 reserved[4];
870};
871
872
873/*
874 * FCoE XFRQ element
875 */
876struct fcoe_xfrqe {
877 u16 wqe;
878#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
879#define FCOE_XFRQE_TASK_ID_SHIFT 0
880#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
881#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15
882};
883
884
885/*
886 * FCoE CONFQ element
887 */
888struct fcoe_confqe {
889#if defined(__BIG_ENDIAN)
890 u16 rx_id;
891 u16 ox_id;
892#elif defined(__LITTLE_ENDIAN)
893 u16 ox_id;
894 u16 rx_id;
895#endif
896 u32 param;
897};
898
899
900/*
901 * FCoE conection data base
902 */
903struct fcoe_conn_db {
904#if defined(__BIG_ENDIAN)
905 u16 rsrv0;
906 u16 rq_prod;
907#elif defined(__LITTLE_ENDIAN)
908 u16 rq_prod;
909 u16 rsrv0;
910#endif
911 u32 rsrv1;
912 struct regpair cq_arm;
913};
914
915
916/*
917 * FCoE CQ element
918 */
919struct fcoe_cqe {
920 u16 wqe;
921#define FCOE_CQE_CQE_INFO (0x3FFF<<0)
922#define FCOE_CQE_CQE_INFO_SHIFT 0
923#define FCOE_CQE_CQE_TYPE (0x1<<14)
924#define FCOE_CQE_CQE_TYPE_SHIFT 14
925#define FCOE_CQE_TOGGLE_BIT (0x1<<15)
926#define FCOE_CQE_TOGGLE_BIT_SHIFT 15
927};
928
929
930/*
931 * FCoE error/warning resporting entry
932 */
933struct fcoe_err_report_entry {
934 u32 err_warn_bitmap_lo;
935 u32 err_warn_bitmap_hi;
936 u32 tx_buf_off;
937 u32 rx_buf_off;
938 struct fcoe_fc_hdr fc_hdr;
939};
940
941
942/*
943 * FCoE hash table entry (32 bytes)
944 */
945struct fcoe_hash_table_entry {
946#if defined(__BIG_ENDIAN)
947 u8 d_id_0;
948 u8 s_id_2;
949 u8 s_id_1;
950 u8 s_id_0;
951#elif defined(__LITTLE_ENDIAN)
952 u8 s_id_0;
953 u8 s_id_1;
954 u8 s_id_2;
955 u8 d_id_0;
956#endif
957#if defined(__BIG_ENDIAN)
958 u16 dst_mac_addr_hi;
959 u8 d_id_2;
960 u8 d_id_1;
961#elif defined(__LITTLE_ENDIAN)
962 u8 d_id_1;
963 u8 d_id_2;
964 u16 dst_mac_addr_hi;
965#endif
966 u32 dst_mac_addr_lo;
967#if defined(__BIG_ENDIAN)
968 u16 vlan_id;
969 u16 src_mac_addr_hi;
970#elif defined(__LITTLE_ENDIAN)
971 u16 src_mac_addr_hi;
972 u16 vlan_id;
973#endif
974 u32 src_mac_addr_lo;
975#if defined(__BIG_ENDIAN)
976 u16 reserved1;
977 u8 reserved0;
978 u8 vlan_flag;
979#elif defined(__LITTLE_ENDIAN)
980 u8 vlan_flag;
981 u8 reserved0;
982 u16 reserved1;
983#endif
984 u32 reserved2;
985 u32 field_id;
986#define FCOE_HASH_TABLE_ENTRY_CID (0xFFFFFF<<0)
987#define FCOE_HASH_TABLE_ENTRY_CID_SHIFT 0
988#define FCOE_HASH_TABLE_ENTRY_RESERVED3 (0x7F<<24)
989#define FCOE_HASH_TABLE_ENTRY_RESERVED3_SHIFT 24
990#define FCOE_HASH_TABLE_ENTRY_VALID (0x1<<31)
991#define FCOE_HASH_TABLE_ENTRY_VALID_SHIFT 31
992};
993
994/*
995 * FCoE pending work request CQE
996 */
997struct fcoe_pend_wq_cqe {
998 u16 wqe;
999#define FCOE_PEND_WQ_CQE_TASK_ID (0x3FFF<<0)
1000#define FCOE_PEND_WQ_CQE_TASK_ID_SHIFT 0
1001#define FCOE_PEND_WQ_CQE_CQE_TYPE (0x1<<14)
1002#define FCOE_PEND_WQ_CQE_CQE_TYPE_SHIFT 14
1003#define FCOE_PEND_WQ_CQE_TOGGLE_BIT (0x1<<15)
1004#define FCOE_PEND_WQ_CQE_TOGGLE_BIT_SHIFT 15
1005};
1006
1007
1008/*
1009 * FCoE RX statistics parameters section#0
1010 */
1011struct fcoe_rx_stat_params_section0 {
1012 u32 fcoe_ver_cnt;
1013 u32 fcoe_rx_pkt_cnt;
1014 u32 fcoe_rx_byte_cnt;
1015 u32 fcoe_rx_drop_pkt_cnt;
1016};
1017
1018
1019/*
1020 * FCoE RX statistics parameters section#1
1021 */
1022struct fcoe_rx_stat_params_section1 {
1023 u32 fc_crc_cnt;
1024 u32 eofa_del_cnt;
1025 u32 miss_frame_cnt;
1026 u32 seq_timeout_cnt;
1027 u32 drop_seq_cnt;
1028 u32 fcoe_rx_drop_pkt_cnt;
1029 u32 fcp_rx_pkt_cnt;
1030 u32 reserved0;
1031};
1032
1033
1034/*
1035 * FCoE TX statistics parameters
1036 */
1037struct fcoe_tx_stat_params {
1038 u32 fcoe_tx_pkt_cnt;
1039 u32 fcoe_tx_byte_cnt;
1040 u32 fcp_tx_pkt_cnt;
1041 u32 reserved0;
1042};
1043
1044/*
1045 * FCoE statistics parameters
1046 */
1047struct fcoe_statistics_params {
1048 struct fcoe_tx_stat_params tx_stat;
1049 struct fcoe_rx_stat_params_section0 rx_stat0;
1050 struct fcoe_rx_stat_params_section1 rx_stat1;
1051};
1052
1053
1054/*
1055 * FCoE t2 hash table entry (64 bytes)
1056 */
1057struct fcoe_t2_hash_table_entry {
1058 struct fcoe_hash_table_entry data;
1059 struct regpair next;
1060 struct regpair reserved0[3];
1061};
1062
1063/*
1064 * FCoE unsolicited CQE
1065 */
1066struct fcoe_unsolicited_cqe {
1067 u16 wqe;
1068#define FCOE_UNSOLICITED_CQE_SUBTYPE (0x3<<0)
1069#define FCOE_UNSOLICITED_CQE_SUBTYPE_SHIFT 0
1070#define FCOE_UNSOLICITED_CQE_PKT_LEN (0xFFF<<2)
1071#define FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT 2
1072#define FCOE_UNSOLICITED_CQE_CQE_TYPE (0x1<<14)
1073#define FCOE_UNSOLICITED_CQE_CQE_TYPE_SHIFT 14
1074#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT (0x1<<15)
1075#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT_SHIFT 15
1076};
1077
1078
1079
1080#endif /* __57XX_FCOE_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
new file mode 100644
index 000000000000..6a38080e35ed
--- /dev/null
+++ b/drivers/scsi/bnx2fc/Kconfig
@@ -0,0 +1,11 @@
1config SCSI_BNX2X_FCOE
2 tristate "Broadcom NetXtreme II FCoE support"
3 depends on PCI
4 select NETDEVICES
5 select NETDEV_1000
6 select LIBFC
7 select LIBFCOE
8 select CNIC
9 ---help---
10 This driver supports FCoE offload for the Broadcom NetXtreme II
11 devices.
diff --git a/drivers/scsi/bnx2fc/Makefile b/drivers/scsi/bnx2fc/Makefile
new file mode 100644
index 000000000000..a92695a25176
--- /dev/null
+++ b/drivers/scsi/bnx2fc/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o
2
3bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
new file mode 100644
index 000000000000..df2fc09ba479
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -0,0 +1,511 @@
1#ifndef _BNX2FC_H_
2#define _BNX2FC_H_
3/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
4 *
5 * Copyright (c) 2008 - 2010 Broadcom Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
12 */
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/kernel.h>
17#include <linux/skbuff.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/if_ether.h>
21#include <linux/if_vlan.h>
22#include <linux/kthread.h>
23#include <linux/crc32.h>
24#include <linux/cpu.h>
25#include <linux/types.h>
26#include <linux/list.h>
27#include <linux/delay.h>
28#include <linux/timer.h>
29#include <linux/errno.h>
30#include <linux/pci.h>
31#include <linux/init.h>
32#include <linux/dma-mapping.h>
33#include <linux/workqueue.h>
34#include <linux/mutex.h>
35#include <linux/spinlock.h>
36#include <linux/bitops.h>
37#include <linux/log2.h>
38#include <linux/interrupt.h>
39#include <linux/sched.h>
40#include <linux/io.h>
41
42#include <scsi/scsi.h>
43#include <scsi/scsi_host.h>
44#include <scsi/scsi_device.h>
45#include <scsi/scsi_cmnd.h>
46#include <scsi/scsi_eh.h>
47#include <scsi/scsi_tcq.h>
48#include <scsi/libfc.h>
49#include <scsi/libfcoe.h>
50#include <scsi/fc_encode.h>
51#include <scsi/scsi_transport.h>
52#include <scsi/scsi_transport_fc.h>
53#include <scsi/fc/fc_fip.h>
54#include <scsi/fc/fc_fc2.h>
55#include <scsi/fc_frame.h>
56#include <scsi/fc/fc_fcoe.h>
57#include <scsi/fc/fc_fcp.h>
58
59#include "57xx_hsi_bnx2fc.h"
60#include "bnx2fc_debug.h"
61#include "../../net/cnic_if.h"
62#include "bnx2fc_constants.h"
63
64#define BNX2FC_NAME "bnx2fc"
65#define BNX2FC_VERSION "1.0.0"
66
67#define PFX "bnx2fc: "
68
69#define BNX2X_DOORBELL_PCI_BAR 2
70
71#define BNX2FC_MAX_BD_LEN 0xffff
72#define BNX2FC_BD_SPLIT_SZ 0x8000
73#define BNX2FC_MAX_BDS_PER_CMD 256
74
75#define BNX2FC_SQ_WQES_MAX 256
76
77#define BNX2FC_SCSI_MAX_SQES ((3 * BNX2FC_SQ_WQES_MAX) / 8)
78#define BNX2FC_TM_MAX_SQES ((BNX2FC_SQ_WQES_MAX) / 2)
79#define BNX2FC_ELS_MAX_SQES (BNX2FC_TM_MAX_SQES - 1)
80
81#define BNX2FC_RQ_WQES_MAX 16
82#define BNX2FC_CQ_WQES_MAX (BNX2FC_SQ_WQES_MAX + BNX2FC_RQ_WQES_MAX)
83
84#define BNX2FC_NUM_MAX_SESS 128
85#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
86
87#define BNX2FC_MAX_OUTSTANDING_CMNDS 4096
88#define BNX2FC_MIN_PAYLOAD 256
89#define BNX2FC_MAX_PAYLOAD 2048
90
91#define BNX2FC_RQ_BUF_SZ 256
92#define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ))
93
94#define BNX2FC_SQ_WQE_SIZE (sizeof(struct fcoe_sqe))
95#define BNX2FC_CQ_WQE_SIZE (sizeof(struct fcoe_cqe))
96#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ)
97#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe))
98#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
99#define BNX2FC_5771X_DB_PAGE_SIZE 128
100
101#define BNX2FC_MAX_TASKS BNX2FC_MAX_OUTSTANDING_CMNDS
102#define BNX2FC_TASK_SIZE 128
103#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
104#define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE)
105
106#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8
107#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024)
108
109#define BNX2FC_MAX_SEQS 255
110
111#define BNX2FC_READ (1 << 1)
112#define BNX2FC_WRITE (1 << 0)
113
114#define BNX2FC_MIN_XID 0
115#define BNX2FC_MAX_XID (BNX2FC_MAX_OUTSTANDING_CMNDS - 1)
116#define FCOE_MIN_XID (BNX2FC_MAX_OUTSTANDING_CMNDS)
117#define FCOE_MAX_XID \
118 (BNX2FC_MAX_OUTSTANDING_CMNDS + (nr_cpu_ids * 256))
119#define BNX2FC_MAX_LUN 0xFFFF
120#define BNX2FC_MAX_FCP_TGT 256
121#define BNX2FC_MAX_CMD_LEN 16
122
123#define BNX2FC_TM_TIMEOUT 60 /* secs */
124#define BNX2FC_IO_TIMEOUT 20000UL /* msecs */
125
126#define BNX2FC_WAIT_CNT 120
127#define BNX2FC_FW_TIMEOUT (3 * HZ)
128
129#define PORT_MAX 2
130
131#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
132
133/* FC FCP Status */
134#define FC_GOOD 0
135
136#define BNX2FC_RNID_HBA 0x7
137
138/* bnx2fc driver uses only one instance of fcoe_percpu_s */
139extern struct fcoe_percpu_s bnx2fc_global;
140
141extern struct workqueue_struct *bnx2fc_wq;
142
143struct bnx2fc_percpu_s {
144 struct task_struct *iothread;
145 struct list_head work_list;
146 spinlock_t fp_work_lock;
147};
148
149
150struct bnx2fc_hba {
151 struct list_head link;
152 struct cnic_dev *cnic;
153 struct pci_dev *pcidev;
154 struct net_device *netdev;
155 struct net_device *phys_dev;
156 unsigned long reg_with_cnic;
157 #define BNX2FC_CNIC_REGISTERED 1
158 struct packet_type fcoe_packet_type;
159 struct packet_type fip_packet_type;
160 struct bnx2fc_cmd_mgr *cmd_mgr;
161 struct workqueue_struct *timer_work_queue;
162 struct kref kref;
163 spinlock_t hba_lock;
164 struct mutex hba_mutex;
165 unsigned long adapter_state;
166 #define ADAPTER_STATE_UP 0
167 #define ADAPTER_STATE_GOING_DOWN 1
168 #define ADAPTER_STATE_LINK_DOWN 2
169 #define ADAPTER_STATE_READY 3
170 u32 flags;
171 unsigned long init_done;
172 #define BNX2FC_FW_INIT_DONE 0
173 #define BNX2FC_CTLR_INIT_DONE 1
174 #define BNX2FC_CREATE_DONE 2
175 struct fcoe_ctlr ctlr;
176 u8 vlan_enabled;
177 int vlan_id;
178 u32 next_conn_id;
179 struct fcoe_task_ctx_entry **task_ctx;
180 dma_addr_t *task_ctx_dma;
181 struct regpair *task_ctx_bd_tbl;
182 dma_addr_t task_ctx_bd_dma;
183
184 int hash_tbl_segment_count;
185 void **hash_tbl_segments;
186 void *hash_tbl_pbl;
187 dma_addr_t hash_tbl_pbl_dma;
188 struct fcoe_t2_hash_table_entry *t2_hash_tbl;
189 dma_addr_t t2_hash_tbl_dma;
190 char *t2_hash_tbl_ptr;
191 dma_addr_t t2_hash_tbl_ptr_dma;
192
193 char *dummy_buffer;
194 dma_addr_t dummy_buf_dma;
195
196 struct fcoe_statistics_params *stats_buffer;
197 dma_addr_t stats_buf_dma;
198
199 /*
200 * PCI related info.
201 */
202 u16 pci_did;
203 u16 pci_vid;
204 u16 pci_sdid;
205 u16 pci_svid;
206 u16 pci_func;
207 u16 pci_devno;
208
209 struct task_struct *l2_thread;
210
211 /* linkdown handling */
212 wait_queue_head_t shutdown_wait;
213 int wait_for_link_down;
214
215 /*destroy handling */
216 struct timer_list destroy_timer;
217 wait_queue_head_t destroy_wait;
218
219 /* Active list of offloaded sessions */
220 struct bnx2fc_rport *tgt_ofld_list[BNX2FC_NUM_MAX_SESS];
221 int num_ofld_sess;
222
223 /* statistics */
224 struct completion stat_req_done;
225};
226
227#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr)
228
229struct bnx2fc_cmd_mgr {
230 struct bnx2fc_hba *hba;
231 u16 next_idx;
232 struct list_head *free_list;
233 spinlock_t *free_list_lock;
234 struct io_bdt **io_bdt_pool;
235 struct bnx2fc_cmd **cmds;
236};
237
238struct bnx2fc_rport {
239 struct fcoe_port *port;
240 struct fc_rport *rport;
241 struct fc_rport_priv *rdata;
242 void __iomem *ctx_base;
243#define DPM_TRIGER_TYPE 0x40
244 u32 fcoe_conn_id;
245 u32 context_id;
246 u32 sid;
247
248 unsigned long flags;
249#define BNX2FC_FLAG_SESSION_READY 0x1
250#define BNX2FC_FLAG_OFFLOADED 0x2
251#define BNX2FC_FLAG_DISABLED 0x3
252#define BNX2FC_FLAG_DESTROYED 0x4
253#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5
254#define BNX2FC_FLAG_DESTROY_CMPL 0x6
255#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x7
256#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8
257#define BNX2FC_FLAG_EXPL_LOGO 0x9
258
259 u32 max_sqes;
260 u32 max_rqes;
261 u32 max_cqes;
262
263 struct fcoe_sqe *sq;
264 dma_addr_t sq_dma;
265 u16 sq_prod_idx;
266 u8 sq_curr_toggle_bit;
267 u32 sq_mem_size;
268
269 struct fcoe_cqe *cq;
270 dma_addr_t cq_dma;
271 u32 cq_cons_idx;
272 u8 cq_curr_toggle_bit;
273 u32 cq_mem_size;
274
275 void *rq;
276 dma_addr_t rq_dma;
277 u32 rq_prod_idx;
278 u32 rq_cons_idx;
279 u32 rq_mem_size;
280
281 void *rq_pbl;
282 dma_addr_t rq_pbl_dma;
283 u32 rq_pbl_size;
284
285 struct fcoe_xfrqe *xferq;
286 dma_addr_t xferq_dma;
287 u32 xferq_mem_size;
288
289 struct fcoe_confqe *confq;
290 dma_addr_t confq_dma;
291 u32 confq_mem_size;
292
293 void *confq_pbl;
294 dma_addr_t confq_pbl_dma;
295 u32 confq_pbl_size;
296
297 struct fcoe_conn_db *conn_db;
298 dma_addr_t conn_db_dma;
299 u32 conn_db_mem_size;
300
301 struct fcoe_sqe *lcq;
302 dma_addr_t lcq_dma;
303 u32 lcq_mem_size;
304
305 void *ofld_req[4];
306 dma_addr_t ofld_req_dma[4];
307 void *enbl_req;
308 dma_addr_t enbl_req_dma;
309
310 spinlock_t tgt_lock;
311 spinlock_t cq_lock;
312 atomic_t num_active_ios;
313 u32 flush_in_prog;
314 unsigned long work_time_slice;
315 unsigned long timestamp;
316 struct list_head free_task_list;
317 struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
318 atomic_t pi;
319 atomic_t ci;
320 struct list_head active_cmd_queue;
321 struct list_head els_queue;
322 struct list_head io_retire_queue;
323 struct list_head active_tm_queue;
324
325 struct timer_list ofld_timer;
326 wait_queue_head_t ofld_wait;
327
328 struct timer_list upld_timer;
329 wait_queue_head_t upld_wait;
330};
331
332struct bnx2fc_mp_req {
333 u8 tm_flags;
334
335 u32 req_len;
336 void *req_buf;
337 dma_addr_t req_buf_dma;
338 struct fcoe_bd_ctx *mp_req_bd;
339 dma_addr_t mp_req_bd_dma;
340 struct fc_frame_header req_fc_hdr;
341
342 u32 resp_len;
343 void *resp_buf;
344 dma_addr_t resp_buf_dma;
345 struct fcoe_bd_ctx *mp_resp_bd;
346 dma_addr_t mp_resp_bd_dma;
347 struct fc_frame_header resp_fc_hdr;
348};
349
350struct bnx2fc_els_cb_arg {
351 struct bnx2fc_cmd *aborted_io_req;
352 struct bnx2fc_cmd *io_req;
353 u16 l2_oxid;
354};
355
356/* bnx2fc command structure */
357struct bnx2fc_cmd {
358 struct list_head link;
359 u8 on_active_queue;
360 u8 on_tmf_queue;
361 u8 cmd_type;
362#define BNX2FC_SCSI_CMD 1
363#define BNX2FC_TASK_MGMT_CMD 2
364#define BNX2FC_ABTS 3
365#define BNX2FC_ELS 4
366#define BNX2FC_CLEANUP 5
367 u8 io_req_flags;
368 struct kref refcount;
369 struct fcoe_port *port;
370 struct bnx2fc_rport *tgt;
371 struct scsi_cmnd *sc_cmd;
372 struct bnx2fc_cmd_mgr *cmd_mgr;
373 struct bnx2fc_mp_req mp_req;
374 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg);
375 struct bnx2fc_els_cb_arg *cb_arg;
376 struct delayed_work timeout_work; /* timer for ULP timeouts */
377 struct completion tm_done;
378 int wait_for_comp;
379 u16 xid;
380 struct fcoe_task_ctx_entry *task;
381 struct io_bdt *bd_tbl;
382 struct fcp_rsp *rsp;
383 size_t data_xfer_len;
384 unsigned long req_flags;
385#define BNX2FC_FLAG_ISSUE_RRQ 0x1
386#define BNX2FC_FLAG_ISSUE_ABTS 0x2
387#define BNX2FC_FLAG_ABTS_DONE 0x3
388#define BNX2FC_FLAG_TM_COMPL 0x4
389#define BNX2FC_FLAG_TM_TIMEOUT 0x5
390#define BNX2FC_FLAG_IO_CLEANUP 0x6
391#define BNX2FC_FLAG_RETIRE_OXID 0x7
392#define BNX2FC_FLAG_EH_ABORT 0x8
393#define BNX2FC_FLAG_IO_COMPL 0x9
394#define BNX2FC_FLAG_ELS_DONE 0xa
395#define BNX2FC_FLAG_ELS_TIMEOUT 0xb
396 u32 fcp_resid;
397 u32 fcp_rsp_len;
398 u32 fcp_sns_len;
399 u8 cdb_status; /* SCSI IO status */
400 u8 fcp_status; /* FCP IO status */
401 u8 fcp_rsp_code;
402 u8 scsi_comp_flags;
403};
404
405struct io_bdt {
406 struct bnx2fc_cmd *io_req;
407 struct fcoe_bd_ctx *bd_tbl;
408 dma_addr_t bd_tbl_dma;
409 u16 bd_valid;
410};
411
412struct bnx2fc_work {
413 struct list_head list;
414 struct bnx2fc_rport *tgt;
415 u16 wqe;
416};
417struct bnx2fc_unsol_els {
418 struct fc_lport *lport;
419 struct fc_frame *fp;
420 struct work_struct unsol_els_work;
421};
422
423
424
425struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
426void bnx2fc_cmd_release(struct kref *ref);
427int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd);
428int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba);
429int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba);
430int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
431 struct bnx2fc_rport *tgt);
432int bnx2fc_send_session_disable_req(struct fcoe_port *port,
433 struct bnx2fc_rport *tgt);
434int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
435 struct bnx2fc_rport *tgt);
436int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt);
437void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
438 u32 num_cqe);
439int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba);
440void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba);
441int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba);
442void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba);
443struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
444 u16 min_xid, u16 max_xid);
445void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr);
446void bnx2fc_get_link_state(struct bnx2fc_hba *hba);
447char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items);
448void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items);
449int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen);
450int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req);
451int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp);
452int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp);
453int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp);
454int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req);
455int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req);
456void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
457 unsigned int timer_msec);
458int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
459void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
460 struct fcoe_task_ctx_entry *task,
461 u16 orig_xid);
462void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
463 struct fcoe_task_ctx_entry *task);
464void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
465 struct fcoe_task_ctx_entry *task);
466void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid);
467void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt);
468int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd);
469int bnx2fc_eh_host_reset(struct scsi_cmnd *sc_cmd);
470int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd);
471int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd);
472void bnx2fc_rport_event_handler(struct fc_lport *lport,
473 struct fc_rport_priv *rport,
474 enum fc_rport_event event);
475void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
476 struct fcoe_task_ctx_entry *task,
477 u8 num_rq);
478void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
479 struct fcoe_task_ctx_entry *task,
480 u8 num_rq);
481void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
482 struct fcoe_task_ctx_entry *task,
483 u8 num_rq);
484void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
485 struct fcoe_task_ctx_entry *task,
486 u8 num_rq);
487void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
488 struct fcoe_task_ctx_entry *task,
489 u8 num_rq);
490void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
491 struct fcp_cmnd *fcp_cmnd);
492
493
494
495void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt);
496struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
497 struct fc_frame *fp, unsigned int op,
498 void (*resp)(struct fc_seq *,
499 struct fc_frame *,
500 void *),
501 void *arg, u32 timeout);
502int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
503void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
504struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
505 u32 port_id);
506void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
507 unsigned char *buf,
508 u32 frame_len, u16 l2_oxid);
509int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
510
511#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h
new file mode 100644
index 000000000000..fe7769173c43
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h
@@ -0,0 +1,206 @@
1#ifndef __BNX2FC_CONSTANTS_H_
2#define __BNX2FC_CONSTANTS_H_
3
4/**
5 * This file defines HSI constants for the FCoE flows
6 */
7
8/* KWQ/KCQ FCoE layer code */
9#define FCOE_KWQE_LAYER_CODE (7)
10
11/* KWQ (kernel work queue) request op codes */
12#define FCOE_KWQE_OPCODE_INIT1 (0)
13#define FCOE_KWQE_OPCODE_INIT2 (1)
14#define FCOE_KWQE_OPCODE_INIT3 (2)
15#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3)
16#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4)
17#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5)
18#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6)
19#define FCOE_KWQE_OPCODE_ENABLE_CONN (7)
20#define FCOE_KWQE_OPCODE_DISABLE_CONN (8)
21#define FCOE_KWQE_OPCODE_DESTROY_CONN (9)
22#define FCOE_KWQE_OPCODE_DESTROY (10)
23#define FCOE_KWQE_OPCODE_STAT (11)
24
25/* KCQ (kernel completion queue) response op codes */
26#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10)
27#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11)
28#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12)
29#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15)
30#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16)
31#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17)
32#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18)
33#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
34#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21)
35
36/* KCQ (kernel completion queue) completion status */
37#define FCOE_KCQE_COMPLETION_STATUS_SUCCESS (0x0)
38#define FCOE_KCQE_COMPLETION_STATUS_ERROR (0x1)
39#define FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x2)
40#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3)
41#define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4)
42#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5)
43
44/* Unsolicited CQE type */
45#define FCOE_UNSOLICITED_FRAME_CQE_TYPE 0
46#define FCOE_ERROR_DETECTION_CQE_TYPE 1
47#define FCOE_WARNING_DETECTION_CQE_TYPE 2
48
49/* Task context constants */
50/* After driver has initialize the task in case timer services required */
51#define FCOE_TASK_TX_STATE_INIT 0
52/* In case timer services are required then shall be updated by Xstorm after
53 * start processing the task. In case no timer facilities are required then the
54 * driver would initialize the state to this value */
55#define FCOE_TASK_TX_STATE_NORMAL 1
56/* Task is under abort procedure. Updated in order to stop processing of
57 * pending WQEs on this task */
58#define FCOE_TASK_TX_STATE_ABORT 2
59/* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */
60#define FCOE_TASK_TX_STATE_ERROR 3
61/* For REC_TOV timer expiration indication received from Xstorm */
62#define FCOE_TASK_TX_STATE_WARNING 4
63/* For completed unsolicited task */
64#define FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED 5
65/* For exchange cleanup request task */
66#define FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP 6
67/* For sequence cleanup request task */
68#define FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP 7
69/* Mark task as aborted and indicate that ABTS was not transmitted */
70#define FCOE_TASK_TX_STATE_BEFORE_ABTS_TX 8
71/* Mark task as aborted and indicate that ABTS was transmitted */
72#define FCOE_TASK_TX_STATE_AFTER_ABTS_TX 9
73/* For completion the ABTS task. */
74#define FCOE_TASK_TX_STATE_ABTS_TX_COMPLETED 10
75/* Mark task as aborted and indicate that Exchange cleanup was not transmitted
76 */
77#define FCOE_TASK_TX_STATE_BEFORE_EXCHANGE_CLEANUP_TX 11
78/* Mark task as aborted and indicate that Exchange cleanup was transmitted */
79#define FCOE_TASK_TX_STATE_AFTER_EXCHANGE_CLEANUP_TX 12
80
81#define FCOE_TASK_RX_STATE_NORMAL 0
82#define FCOE_TASK_RX_STATE_COMPLETED 1
83/* Obsolete: Intermediate completion (middle path with local completion) */
84#define FCOE_TASK_RX_STATE_INTER_COMP 2
85/* For REC_TOV timer expiration indication received from Xstorm */
86#define FCOE_TASK_RX_STATE_WARNING 3
87/* For E_D_T_TOV timer expiration in Ustorm */
88#define FCOE_TASK_RX_STATE_ERROR 4
89/* ABTS ACC arrived wait for local completion to finally complete the task. */
90#define FCOE_TASK_RX_STATE_ABTS_ACC_ARRIVED 5
91/* local completion arrived wait for ABTS ACC to finally complete the task. */
92#define FCOE_TASK_RX_STATE_ABTS_LOCAL_COMP_ARRIVED 6
93/* Special completion indication in case of task was aborted. */
94#define FCOE_TASK_RX_STATE_ABTS_COMPLETED 7
95/* Special completion indication in case of task was cleaned. */
96#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 8
97/* Special completion indication (in task requested the exchange cleanup) in
98 * case cleaned task is in non-valid. */
99#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 9
100/* Special completion indication (in task requested the sequence cleanup) in
101 * case cleaned task was already returned to normal. */
102#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 10
103/* Exchange cleanup arrived wait until xfer will be handled to finally
104 * complete the task. */
105#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_ARRIVED 11
106/* Xfer handled, wait for exchange cleanup to finally complete the task. */
107#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_HANDLED_XFER 12
108
109#define FCOE_TASK_TYPE_WRITE 0
110#define FCOE_TASK_TYPE_READ 1
111#define FCOE_TASK_TYPE_MIDPATH 2
112#define FCOE_TASK_TYPE_UNSOLICITED 3
113#define FCOE_TASK_TYPE_ABTS 4
114#define FCOE_TASK_TYPE_EXCHANGE_CLEANUP 5
115#define FCOE_TASK_TYPE_SEQUENCE_CLEANUP 6
116
117#define FCOE_TASK_DEV_TYPE_DISK 0
118#define FCOE_TASK_DEV_TYPE_TAPE 1
119
120#define FCOE_TASK_CLASS_TYPE_3 0
121#define FCOE_TASK_CLASS_TYPE_2 1
122
123/* Everest FCoE connection type */
124#define B577XX_FCOE_CONNECTION_TYPE 4
125
126/* Error codes for Error Reporting in fast path flows */
127/* XFER error codes */
128#define FCOE_ERROR_CODE_XFER_OOO_RO 0
129#define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED 1
130#define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN 2
131#define FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS 3
132#define FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE 4
133#define FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE 5
134#define FCOE_ERROR_CODE_XFER_PEND_XFER_SET 6
135#define FCOE_ERROR_CODE_XFER_OPENED_SEQ 7
136#define FCOE_ERROR_CODE_XFER_FCTL 8
137
138/* FCP RSP error codes */
139#define FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET 9
140#define FCOE_ERROR_CODE_FCP_RSP_UNDERFLOW 10
141#define FCOE_ERROR_CODE_FCP_RSP_OVERFLOW 11
142#define FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD 12
143#define FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD 13
144#define FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE 14
145#define FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET 15
146#define FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ 16
147#define FCOE_ERROR_CODE_FCP_RSP_FCTL 17
148#define FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET 18
149#define FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET 19
150
151/* FCP DATA error codes */
152#define FCOE_ERROR_CODE_DATA_OOO_RO 20
153#define FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE 21
154#define FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS 22
155#define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET 23
156#define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET 24
157#define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET 25
158#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26
159#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27
160#define FCOE_ERROR_CODE_DATA_FCTL 28
161
162/* Middle path error codes */
163#define FCOE_ERROR_CODE_MIDPATH_TYPE_NOT_ELS 29
164#define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET 30
165#define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET 31
166#define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET 32
167#define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET 33
168#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_FCTL 34
169#define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY 35
170#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL 36
171
172/* ABTS error codes */
173#define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL 37
174#define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD 38
175#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL 39
176#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40
177#define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH 41
178
179/* Common error codes */
180#define FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD 42
181#define FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE 43
182#define FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH 44
183#define FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT 45
184#define FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH 46
185#define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES 47
186#define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR 48
187#define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG 49
188#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50
189
190/* Unsolicited Rx error codes */
191#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS 51
192#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_BLS 52
193#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_ELS 53
194#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_BLS 54
195#define FCOE_ERROR_CODE_UNSOLICITED_R_CTL 55
196
197#define FCOE_ERROR_CODE_RW_TASK_DDF_RCTL_INFO_FIELD 56
198#define FCOE_ERROR_CODE_RW_TASK_INVALID_RCTL 57
199#define FCOE_ERROR_CODE_RW_TASK_RCTL_GENERAL_MISMATCH 58
200
201/* Timer error codes */
202#define FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION 60
203#define FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION 61
204
205
206#endif /* BNX2FC_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h
new file mode 100644
index 000000000000..7f6aff68cc53
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h
@@ -0,0 +1,70 @@
1#ifndef __BNX2FC_DEBUG__
2#define __BNX2FC_DEBUG__
3
4/* Log level bit mask */
5#define LOG_IO 0x01 /* scsi cmd error, cleanup */
6#define LOG_TGT 0x02 /* Session setup, cleanup, etc' */
7#define LOG_HBA 0x04 /* lport events, link, mtu, etc' */
8#define LOG_ELS 0x08 /* ELS logs */
9#define LOG_MISC 0x10 /* fcoe L2 frame related logs*/
10#define LOG_ALL 0xff /* LOG all messages */
11
12extern unsigned int bnx2fc_debug_level;
13
14#define BNX2FC_CHK_LOGGING(LEVEL, CMD) \
15 do { \
16 if (unlikely(bnx2fc_debug_level & LEVEL)) \
17 do { \
18 CMD; \
19 } while (0); \
20 } while (0)
21
22#define BNX2FC_ELS_DBG(fmt, arg...) \
23 BNX2FC_CHK_LOGGING(LOG_ELS, \
24 printk(KERN_ALERT PFX fmt, ##arg))
25
26#define BNX2FC_MISC_DBG(fmt, arg...) \
27 BNX2FC_CHK_LOGGING(LOG_MISC, \
28 printk(KERN_ALERT PFX fmt, ##arg))
29
30#define BNX2FC_IO_DBG(io_req, fmt, arg...) \
31 do { \
32 if (!io_req || !io_req->port || !io_req->port->lport || \
33 !io_req->port->lport->host) \
34 BNX2FC_CHK_LOGGING(LOG_IO, \
35 printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
36 else \
37 BNX2FC_CHK_LOGGING(LOG_IO, \
38 shost_printk(KERN_ALERT, \
39 (io_req)->port->lport->host, \
40 PFX "xid:0x%x " fmt, \
41 (io_req)->xid, ##arg)); \
42 } while (0)
43
44#define BNX2FC_TGT_DBG(tgt, fmt, arg...) \
45 do { \
46 if (!tgt || !tgt->port || !tgt->port->lport || \
47 !tgt->port->lport->host || !tgt->rport) \
48 BNX2FC_CHK_LOGGING(LOG_TGT, \
49 printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
50 else \
51 BNX2FC_CHK_LOGGING(LOG_TGT, \
52 shost_printk(KERN_ALERT, \
53 (tgt)->port->lport->host, \
54 PFX "port:%x " fmt, \
55 (tgt)->rport->port_id, ##arg)); \
56 } while (0)
57
58
59#define BNX2FC_HBA_DBG(lport, fmt, arg...) \
60 do { \
61 if (!lport || !lport->host) \
62 BNX2FC_CHK_LOGGING(LOG_HBA, \
63 printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
64 else \
65 BNX2FC_CHK_LOGGING(LOG_HBA, \
66 shost_printk(KERN_ALERT, lport->host, \
67 PFX fmt, ##arg)); \
68 } while (0)
69
70#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
new file mode 100644
index 000000000000..7a11a255157f
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -0,0 +1,515 @@
1/*
2 * bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver.
3 * This file contains helper routines that handle ELS requests
4 * and responses.
5 *
6 * Copyright (c) 2008 - 2010 Broadcom Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 *
12 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
13 */
14
15#include "bnx2fc.h"
16
17static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
18 void *arg);
19static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
20 void *arg);
21static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
22 void *data, u32 data_len,
23 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
24 struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
25
26static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
27{
28 struct bnx2fc_cmd *orig_io_req;
29 struct bnx2fc_cmd *rrq_req;
30 int rc = 0;
31
32 BUG_ON(!cb_arg);
33 rrq_req = cb_arg->io_req;
34 orig_io_req = cb_arg->aborted_io_req;
35 BUG_ON(!orig_io_req);
36 BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
37 orig_io_req->xid, rrq_req->xid);
38
39 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
40
41 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
42 /*
43 * els req is timed out. cleanup the IO with FW and
44 * drop the completion. Remove from active_cmd_queue.
45 */
46 BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
47 rrq_req->xid);
48
49 if (rrq_req->on_active_queue) {
50 list_del_init(&rrq_req->link);
51 rrq_req->on_active_queue = 0;
52 rc = bnx2fc_initiate_cleanup(rrq_req);
53 BUG_ON(rc);
54 }
55 }
56 kfree(cb_arg);
57}
58int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
59{
60
61 struct fc_els_rrq rrq;
62 struct bnx2fc_rport *tgt = aborted_io_req->tgt;
63 struct fc_lport *lport = tgt->rdata->local_port;
64 struct bnx2fc_els_cb_arg *cb_arg = NULL;
65 u32 sid = tgt->sid;
66 u32 r_a_tov = lport->r_a_tov;
67 unsigned long start = jiffies;
68 int rc;
69
70 BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
71 aborted_io_req->xid);
72 memset(&rrq, 0, sizeof(rrq));
73
74 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
75 if (!cb_arg) {
76 printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
77 rc = -ENOMEM;
78 goto rrq_err;
79 }
80
81 cb_arg->aborted_io_req = aborted_io_req;
82
83 rrq.rrq_cmd = ELS_RRQ;
84 hton24(rrq.rrq_s_id, sid);
85 rrq.rrq_ox_id = htons(aborted_io_req->xid);
86 rrq.rrq_rx_id = htons(aborted_io_req->task->rx_wr_tx_rd.rx_id);
87
88retry_rrq:
89 rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
90 bnx2fc_rrq_compl, cb_arg,
91 r_a_tov);
92 if (rc == -ENOMEM) {
93 if (time_after(jiffies, start + (10 * HZ))) {
94 BNX2FC_ELS_DBG("rrq Failed\n");
95 rc = FAILED;
96 goto rrq_err;
97 }
98 msleep(20);
99 goto retry_rrq;
100 }
101rrq_err:
102 if (rc) {
103 BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
104 aborted_io_req->xid);
105 kfree(cb_arg);
106 spin_lock_bh(&tgt->tgt_lock);
107 kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
108 spin_unlock_bh(&tgt->tgt_lock);
109 }
110 return rc;
111}
112
113static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
114{
115 struct bnx2fc_cmd *els_req;
116 struct bnx2fc_rport *tgt;
117 struct bnx2fc_mp_req *mp_req;
118 struct fc_frame_header *fc_hdr;
119 unsigned char *buf;
120 void *resp_buf;
121 u32 resp_len, hdr_len;
122 u16 l2_oxid;
123 int frame_len;
124 int rc = 0;
125
126 l2_oxid = cb_arg->l2_oxid;
127 BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
128
129 els_req = cb_arg->io_req;
130 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
131 /*
132 * els req is timed out. cleanup the IO with FW and
133 * drop the completion. libfc will handle the els timeout
134 */
135 if (els_req->on_active_queue) {
136 list_del_init(&els_req->link);
137 els_req->on_active_queue = 0;
138 rc = bnx2fc_initiate_cleanup(els_req);
139 BUG_ON(rc);
140 }
141 goto free_arg;
142 }
143
144 tgt = els_req->tgt;
145 mp_req = &(els_req->mp_req);
146 fc_hdr = &(mp_req->resp_fc_hdr);
147 resp_len = mp_req->resp_len;
148 resp_buf = mp_req->resp_buf;
149
150 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
151 if (!buf) {
152 printk(KERN_ERR PFX "Unable to alloc mp buf\n");
153 goto free_arg;
154 }
155 hdr_len = sizeof(*fc_hdr);
156 if (hdr_len + resp_len > PAGE_SIZE) {
157 printk(KERN_ERR PFX "l2_els_compl: resp len is "
158 "beyond page size\n");
159 goto free_buf;
160 }
161 memcpy(buf, fc_hdr, hdr_len);
162 memcpy(buf + hdr_len, resp_buf, resp_len);
163 frame_len = hdr_len + resp_len;
164
165 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
166
167free_buf:
168 kfree(buf);
169free_arg:
170 kfree(cb_arg);
171}
172
173int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
174{
175 struct fc_els_adisc *adisc;
176 struct fc_frame_header *fh;
177 struct bnx2fc_els_cb_arg *cb_arg;
178 struct fc_lport *lport = tgt->rdata->local_port;
179 u32 r_a_tov = lport->r_a_tov;
180 int rc;
181
182 fh = fc_frame_header_get(fp);
183 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
184 if (!cb_arg) {
185 printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
186 return -ENOMEM;
187 }
188
189 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
190
191 BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
192 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
193 /* adisc is initialized by libfc */
194 rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
195 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
196 if (rc)
197 kfree(cb_arg);
198 return rc;
199}
200
201int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
202{
203 struct fc_els_logo *logo;
204 struct fc_frame_header *fh;
205 struct bnx2fc_els_cb_arg *cb_arg;
206 struct fc_lport *lport = tgt->rdata->local_port;
207 u32 r_a_tov = lport->r_a_tov;
208 int rc;
209
210 fh = fc_frame_header_get(fp);
211 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
212 if (!cb_arg) {
213 printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
214 return -ENOMEM;
215 }
216
217 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
218
219 BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
220 logo = fc_frame_payload_get(fp, sizeof(*logo));
221 /* logo is initialized by libfc */
222 rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
223 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
224 if (rc)
225 kfree(cb_arg);
226 return rc;
227}
228
229int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
230{
231 struct fc_els_rls *rls;
232 struct fc_frame_header *fh;
233 struct bnx2fc_els_cb_arg *cb_arg;
234 struct fc_lport *lport = tgt->rdata->local_port;
235 u32 r_a_tov = lport->r_a_tov;
236 int rc;
237
238 fh = fc_frame_header_get(fp);
239 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
240 if (!cb_arg) {
241 printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
242 return -ENOMEM;
243 }
244
245 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
246
247 rls = fc_frame_payload_get(fp, sizeof(*rls));
248 /* rls is initialized by libfc */
249 rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
250 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
251 if (rc)
252 kfree(cb_arg);
253 return rc;
254}
255
256static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
257 void *data, u32 data_len,
258 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
259 struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
260{
261 struct fcoe_port *port = tgt->port;
262 struct bnx2fc_hba *hba = port->priv;
263 struct fc_rport *rport = tgt->rport;
264 struct fc_lport *lport = port->lport;
265 struct bnx2fc_cmd *els_req;
266 struct bnx2fc_mp_req *mp_req;
267 struct fc_frame_header *fc_hdr;
268 struct fcoe_task_ctx_entry *task;
269 struct fcoe_task_ctx_entry *task_page;
270 int rc = 0;
271 int task_idx, index;
272 u32 did, sid;
273 u16 xid;
274
275 rc = fc_remote_port_chkready(rport);
276 if (rc) {
277 printk(KERN_ALERT PFX "els 0x%x: rport not ready\n", op);
278 rc = -EINVAL;
279 goto els_err;
280 }
281 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
282 printk(KERN_ALERT PFX "els 0x%x: link is not ready\n", op);
283 rc = -EINVAL;
284 goto els_err;
285 }
286 if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
287 (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
288 printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
289 rc = -EINVAL;
290 goto els_err;
291 }
292 els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
293 if (!els_req) {
294 rc = -ENOMEM;
295 goto els_err;
296 }
297
298 els_req->sc_cmd = NULL;
299 els_req->port = port;
300 els_req->tgt = tgt;
301 els_req->cb_func = cb_func;
302 cb_arg->io_req = els_req;
303 els_req->cb_arg = cb_arg;
304
305 mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
306 rc = bnx2fc_init_mp_req(els_req);
307 if (rc == FAILED) {
308 printk(KERN_ALERT PFX "ELS MP request init failed\n");
309 spin_lock_bh(&tgt->tgt_lock);
310 kref_put(&els_req->refcount, bnx2fc_cmd_release);
311 spin_unlock_bh(&tgt->tgt_lock);
312 rc = -ENOMEM;
313 goto els_err;
314 } else {
315 /* rc SUCCESS */
316 rc = 0;
317 }
318
319 /* Set the data_xfer_len to the size of ELS payload */
320 mp_req->req_len = data_len;
321 els_req->data_xfer_len = mp_req->req_len;
322
323 /* Fill ELS Payload */
324 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
325 memcpy(mp_req->req_buf, data, data_len);
326 } else {
327 printk(KERN_ALERT PFX "Invalid ELS op 0x%x\n", op);
328 els_req->cb_func = NULL;
329 els_req->cb_arg = NULL;
330 spin_lock_bh(&tgt->tgt_lock);
331 kref_put(&els_req->refcount, bnx2fc_cmd_release);
332 spin_unlock_bh(&tgt->tgt_lock);
333 rc = -EINVAL;
334 }
335
336 if (rc)
337 goto els_err;
338
339 /* Fill FC header */
340 fc_hdr = &(mp_req->req_fc_hdr);
341
342 did = tgt->rport->port_id;
343 sid = tgt->sid;
344
345 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
346 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
347 FC_FC_SEQ_INIT, 0);
348
349 /* Obtain exchange id */
350 xid = els_req->xid;
351 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
352 index = xid % BNX2FC_TASKS_PER_PAGE;
353
354 /* Initialize task context for this IO request */
355 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
356 task = &(task_page[index]);
357 bnx2fc_init_mp_task(els_req, task);
358
359 spin_lock_bh(&tgt->tgt_lock);
360
361 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
362 printk(KERN_ERR PFX "initiate_els.. session not ready\n");
363 els_req->cb_func = NULL;
364 els_req->cb_arg = NULL;
365 kref_put(&els_req->refcount, bnx2fc_cmd_release);
366 spin_unlock_bh(&tgt->tgt_lock);
367 return -EINVAL;
368 }
369
370 if (timer_msec)
371 bnx2fc_cmd_timer_set(els_req, timer_msec);
372 bnx2fc_add_2_sq(tgt, xid);
373
374 els_req->on_active_queue = 1;
375 list_add_tail(&els_req->link, &tgt->els_queue);
376
377 /* Ring doorbell */
378 bnx2fc_ring_doorbell(tgt);
379 spin_unlock_bh(&tgt->tgt_lock);
380
381els_err:
382 return rc;
383}
384
385void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
386 struct fcoe_task_ctx_entry *task, u8 num_rq)
387{
388 struct bnx2fc_mp_req *mp_req;
389 struct fc_frame_header *fc_hdr;
390 u64 *hdr;
391 u64 *temp_hdr;
392
393 BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
394 "cmd_type = %d\n", els_req->xid, els_req->cmd_type);
395
396 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
397 &els_req->req_flags)) {
398 BNX2FC_ELS_DBG("Timer context finished processing this "
399 "els - 0x%x\n", els_req->xid);
400 /* This IO doesnt receive cleanup completion */
401 kref_put(&els_req->refcount, bnx2fc_cmd_release);
402 return;
403 }
404
405 /* Cancel the timeout_work, as we received the response */
406 if (cancel_delayed_work(&els_req->timeout_work))
407 kref_put(&els_req->refcount,
408 bnx2fc_cmd_release); /* drop timer hold */
409
410 if (els_req->on_active_queue) {
411 list_del_init(&els_req->link);
412 els_req->on_active_queue = 0;
413 }
414
415 mp_req = &(els_req->mp_req);
416 fc_hdr = &(mp_req->resp_fc_hdr);
417
418 hdr = (u64 *)fc_hdr;
419 temp_hdr = (u64 *)
420 &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
421 hdr[0] = cpu_to_be64(temp_hdr[0]);
422 hdr[1] = cpu_to_be64(temp_hdr[1]);
423 hdr[2] = cpu_to_be64(temp_hdr[2]);
424
425 mp_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
426
427 /* Parse ELS response */
428 if ((els_req->cb_func) && (els_req->cb_arg)) {
429 els_req->cb_func(els_req->cb_arg);
430 els_req->cb_arg = NULL;
431 }
432
433 kref_put(&els_req->refcount, bnx2fc_cmd_release);
434}
435
436static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
437 void *arg)
438{
439 struct fcoe_ctlr *fip = arg;
440 struct fc_exch *exch = fc_seq_exch(seq);
441 struct fc_lport *lport = exch->lp;
442 u8 *mac;
443 struct fc_frame_header *fh;
444 u8 op;
445
446 if (IS_ERR(fp))
447 goto done;
448
449 mac = fr_cb(fp)->granted_mac;
450 if (is_zero_ether_addr(mac)) {
451 fh = fc_frame_header_get(fp);
452 if (fh->fh_type != FC_TYPE_ELS) {
453 printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
454 "fh_type != FC_TYPE_ELS\n");
455 fc_frame_free(fp);
456 return;
457 }
458 op = fc_frame_payload_op(fp);
459 if (lport->vport) {
460 if (op == ELS_LS_RJT) {
461 printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
462 fc_vport_terminate(lport->vport);
463 fc_frame_free(fp);
464 return;
465 }
466 }
467 if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
468 fc_frame_free(fp);
469 return;
470 }
471 }
472 fip->update_mac(lport, mac);
473done:
474 fc_lport_flogi_resp(seq, fp, lport);
475}
476
477static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
478 void *arg)
479{
480 struct fcoe_ctlr *fip = arg;
481 struct fc_exch *exch = fc_seq_exch(seq);
482 struct fc_lport *lport = exch->lp;
483 static u8 zero_mac[ETH_ALEN] = { 0 };
484
485 if (!IS_ERR(fp))
486 fip->update_mac(lport, zero_mac);
487 fc_lport_logo_resp(seq, fp, lport);
488}
489
490struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
491 struct fc_frame *fp, unsigned int op,
492 void (*resp)(struct fc_seq *,
493 struct fc_frame *,
494 void *),
495 void *arg, u32 timeout)
496{
497 struct fcoe_port *port = lport_priv(lport);
498 struct bnx2fc_hba *hba = port->priv;
499 struct fcoe_ctlr *fip = &hba->ctlr;
500 struct fc_frame_header *fh = fc_frame_header_get(fp);
501
502 switch (op) {
503 case ELS_FLOGI:
504 case ELS_FDISC:
505 return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
506 fip, timeout);
507 case ELS_LOGO:
508 /* only hook onto fabric logouts, not port logouts */
509 if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
510 break;
511 return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
512 fip, timeout);
513 }
514 return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
515}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
new file mode 100644
index 000000000000..e476e8753079
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -0,0 +1,2535 @@
1/* bnx2fc_fcoe.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * This file contains the code that interacts with libfc, libfcoe,
3 * cnic modules to create FCoE instances, send/receive non-offloaded
4 * FIP/FCoE packets, listen to link events etc.
5 *
6 * Copyright (c) 2008 - 2010 Broadcom Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 *
12 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
13 */
14
15#include "bnx2fc.h"
16
17static struct list_head adapter_list;
18static u32 adapter_count;
19static DEFINE_MUTEX(bnx2fc_dev_lock);
20DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
21
22#define DRV_MODULE_NAME "bnx2fc"
23#define DRV_MODULE_VERSION BNX2FC_VERSION
24#define DRV_MODULE_RELDATE "Jan 25, 2011"
25
26
27static char version[] __devinitdata =
28 "Broadcom NetXtreme II FCoE Driver " DRV_MODULE_NAME \
29 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
30
31
32MODULE_AUTHOR("Bhanu Prakash Gollapudi <bprakash@broadcom.com>");
33MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 FCoE Driver");
34MODULE_LICENSE("GPL");
35MODULE_VERSION(DRV_MODULE_VERSION);
36
37#define BNX2FC_MAX_QUEUE_DEPTH 256
38#define BNX2FC_MIN_QUEUE_DEPTH 32
39#define FCOE_WORD_TO_BYTE 4
40
41static struct scsi_transport_template *bnx2fc_transport_template;
42static struct scsi_transport_template *bnx2fc_vport_xport_template;
43
44struct workqueue_struct *bnx2fc_wq;
45
46/* bnx2fc structure needs only one instance of the fcoe_percpu_s structure.
47 * Here the io threads are per cpu but the l2 thread is just one
48 */
49struct fcoe_percpu_s bnx2fc_global;
50DEFINE_SPINLOCK(bnx2fc_global_lock);
51
52static struct cnic_ulp_ops bnx2fc_cnic_cb;
53static struct libfc_function_template bnx2fc_libfc_fcn_templ;
54static struct scsi_host_template bnx2fc_shost_template;
55static struct fc_function_template bnx2fc_transport_function;
56static struct fc_function_template bnx2fc_vport_xport_function;
57static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
58static int bnx2fc_destroy(struct net_device *net_device);
59static int bnx2fc_enable(struct net_device *netdev);
60static int bnx2fc_disable(struct net_device *netdev);
61
62static void bnx2fc_recv_frame(struct sk_buff *skb);
63
64static void bnx2fc_start_disc(struct bnx2fc_hba *hba);
65static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
66static int bnx2fc_net_config(struct fc_lport *lp);
67static int bnx2fc_lport_config(struct fc_lport *lport);
68static int bnx2fc_em_config(struct fc_lport *lport);
69static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
70static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
71static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
72static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
73static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
74 struct device *parent, int npiv);
75static void bnx2fc_destroy_work(struct work_struct *work);
76
77static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
78static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
79
80static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
81static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba);
82
83static void bnx2fc_port_shutdown(struct fc_lport *lport);
84static void bnx2fc_stop(struct bnx2fc_hba *hba);
85static int __init bnx2fc_mod_init(void);
86static void __exit bnx2fc_mod_exit(void);
87
88unsigned int bnx2fc_debug_level;
89module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
90
91static int bnx2fc_cpu_callback(struct notifier_block *nfb,
92 unsigned long action, void *hcpu);
93/* notification function for CPU hotplug events */
94static struct notifier_block bnx2fc_cpu_notifier = {
95 .notifier_call = bnx2fc_cpu_callback,
96};
97
98static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
99{
100 struct fcoe_percpu_s *bg;
101 struct fcoe_rcv_info *fr;
102 struct sk_buff_head *list;
103 struct sk_buff *skb, *next;
104 struct sk_buff *head;
105
106 bg = &bnx2fc_global;
107 spin_lock_bh(&bg->fcoe_rx_list.lock);
108 list = &bg->fcoe_rx_list;
109 head = list->next;
110 for (skb = head; skb != (struct sk_buff *)list;
111 skb = next) {
112 next = skb->next;
113 fr = fcoe_dev_from_skb(skb);
114 if (fr->fr_dev == lp) {
115 __skb_unlink(skb, list);
116 kfree_skb(skb);
117 }
118 }
119 spin_unlock_bh(&bg->fcoe_rx_list.lock);
120}
121
122int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen)
123{
124 int rc;
125 spin_lock(&bnx2fc_global_lock);
126 rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global);
127 spin_unlock(&bnx2fc_global_lock);
128
129 return rc;
130}
131
132static void bnx2fc_abort_io(struct fc_lport *lport)
133{
134 /*
135 * This function is no-op for bnx2fc, but we do
136 * not want to leave it as NULL either, as libfc
137 * can call the default function which is
138 * fc_fcp_abort_io.
139 */
140}
141
142static void bnx2fc_cleanup(struct fc_lport *lport)
143{
144 struct fcoe_port *port = lport_priv(lport);
145 struct bnx2fc_hba *hba = port->priv;
146 struct bnx2fc_rport *tgt;
147 int i;
148
149 BNX2FC_MISC_DBG("Entered %s\n", __func__);
150 mutex_lock(&hba->hba_mutex);
151 spin_lock_bh(&hba->hba_lock);
152 for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
153 tgt = hba->tgt_ofld_list[i];
154 if (tgt) {
155 /* Cleanup IOs belonging to requested vport */
156 if (tgt->port == port) {
157 spin_unlock_bh(&hba->hba_lock);
158 BNX2FC_TGT_DBG(tgt, "flush/cleanup\n");
159 bnx2fc_flush_active_ios(tgt);
160 spin_lock_bh(&hba->hba_lock);
161 }
162 }
163 }
164 spin_unlock_bh(&hba->hba_lock);
165 mutex_unlock(&hba->hba_mutex);
166}
167
168static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt,
169 struct fc_frame *fp)
170{
171 struct fc_rport_priv *rdata = tgt->rdata;
172 struct fc_frame_header *fh;
173 int rc = 0;
174
175 fh = fc_frame_header_get(fp);
176 BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, "
177 "r_ctl = 0x%x\n", rdata->ids.port_id,
178 ntohs(fh->fh_ox_id), fh->fh_r_ctl);
179 if ((fh->fh_type == FC_TYPE_ELS) &&
180 (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
181
182 switch (fc_frame_payload_op(fp)) {
183 case ELS_ADISC:
184 rc = bnx2fc_send_adisc(tgt, fp);
185 break;
186 case ELS_LOGO:
187 rc = bnx2fc_send_logo(tgt, fp);
188 break;
189 case ELS_RLS:
190 rc = bnx2fc_send_rls(tgt, fp);
191 break;
192 default:
193 break;
194 }
195 } else if ((fh->fh_type == FC_TYPE_BLS) &&
196 (fh->fh_r_ctl == FC_RCTL_BA_ABTS))
197 BNX2FC_TGT_DBG(tgt, "ABTS frame\n");
198 else {
199 BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x "
200 "rctl 0x%x thru non-offload path\n",
201 fh->fh_type, fh->fh_r_ctl);
202 return -ENODEV;
203 }
204 if (rc)
205 return -ENOMEM;
206 else
207 return 0;
208}
209
210/**
211 * bnx2fc_xmit - bnx2fc's FCoE frame transmit function
212 *
213 * @lport: the associated local port
214 * @fp: the fc_frame to be transmitted
215 */
216static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
217{
218 struct ethhdr *eh;
219 struct fcoe_crc_eof *cp;
220 struct sk_buff *skb;
221 struct fc_frame_header *fh;
222 struct bnx2fc_hba *hba;
223 struct fcoe_port *port;
224 struct fcoe_hdr *hp;
225 struct bnx2fc_rport *tgt;
226 struct fcoe_dev_stats *stats;
227 u8 sof, eof;
228 u32 crc;
229 unsigned int hlen, tlen, elen;
230 int wlen, rc = 0;
231
232 port = (struct fcoe_port *)lport_priv(lport);
233 hba = port->priv;
234
235 fh = fc_frame_header_get(fp);
236
237 skb = fp_skb(fp);
238 if (!lport->link_up) {
239 BNX2FC_HBA_DBG(lport, "bnx2fc_xmit link down\n");
240 kfree_skb(skb);
241 return 0;
242 }
243
244 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
245 if (!hba->ctlr.sel_fcf) {
246 BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
247 kfree_skb(skb);
248 return -EINVAL;
249 }
250 if (fcoe_ctlr_els_send(&hba->ctlr, lport, skb))
251 return 0;
252 }
253
254 sof = fr_sof(fp);
255 eof = fr_eof(fp);
256
257 /*
258 * Snoop the frame header to check if the frame is for
259 * an offloaded session
260 */
261 /*
262 * tgt_ofld_list access is synchronized using
263 * both hba mutex and hba lock. Atleast hba mutex or
264 * hba lock needs to be held for read access.
265 */
266
267 spin_lock_bh(&hba->hba_lock);
268 tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id));
269 if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
270 /* This frame is for offloaded session */
271 BNX2FC_HBA_DBG(lport, "xmit: Frame is for offloaded session "
272 "port_id = 0x%x\n", ntoh24(fh->fh_d_id));
273 spin_unlock_bh(&hba->hba_lock);
274 rc = bnx2fc_xmit_l2_frame(tgt, fp);
275 if (rc != -ENODEV) {
276 kfree_skb(skb);
277 return rc;
278 }
279 } else {
280 spin_unlock_bh(&hba->hba_lock);
281 }
282
283 elen = sizeof(struct ethhdr);
284 hlen = sizeof(struct fcoe_hdr);
285 tlen = sizeof(struct fcoe_crc_eof);
286 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
287
288 skb->ip_summed = CHECKSUM_NONE;
289 crc = fcoe_fc_crc(fp);
290
291 /* copy port crc and eof to the skb buff */
292 if (skb_is_nonlinear(skb)) {
293 skb_frag_t *frag;
294 if (bnx2fc_get_paged_crc_eof(skb, tlen)) {
295 kfree_skb(skb);
296 return -ENOMEM;
297 }
298 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
299 cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
300 + frag->page_offset;
301 } else {
302 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
303 }
304
305 memset(cp, 0, sizeof(*cp));
306 cp->fcoe_eof = eof;
307 cp->fcoe_crc32 = cpu_to_le32(~crc);
308 if (skb_is_nonlinear(skb)) {
309 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
310 cp = NULL;
311 }
312
313 /* adjust skb network/transport offsets to match mac/fcoe/port */
314 skb_push(skb, elen + hlen);
315 skb_reset_mac_header(skb);
316 skb_reset_network_header(skb);
317 skb->mac_len = elen;
318 skb->protocol = htons(ETH_P_FCOE);
319 skb->dev = hba->netdev;
320
321 /* fill up mac and fcoe headers */
322 eh = eth_hdr(skb);
323 eh->h_proto = htons(ETH_P_FCOE);
324 if (hba->ctlr.map_dest)
325 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
326 else
327 /* insert GW address */
328 memcpy(eh->h_dest, hba->ctlr.dest_addr, ETH_ALEN);
329
330 if (unlikely(hba->ctlr.flogi_oxid != FC_XID_UNKNOWN))
331 memcpy(eh->h_source, hba->ctlr.ctl_src_addr, ETH_ALEN);
332 else
333 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
334
335 hp = (struct fcoe_hdr *)(eh + 1);
336 memset(hp, 0, sizeof(*hp));
337 if (FC_FCOE_VER)
338 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
339 hp->fcoe_sof = sof;
340
341 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
342 if (lport->seq_offload && fr_max_payload(fp)) {
343 skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
344 skb_shinfo(skb)->gso_size = fr_max_payload(fp);
345 } else {
346 skb_shinfo(skb)->gso_type = 0;
347 skb_shinfo(skb)->gso_size = 0;
348 }
349
350 /*update tx stats */
351 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
352 stats->TxFrames++;
353 stats->TxWords += wlen;
354 put_cpu();
355
356 /* send down to lld */
357 fr_dev(fp) = lport;
358 if (port->fcoe_pending_queue.qlen)
359 fcoe_check_wait_queue(lport, skb);
360 else if (fcoe_start_io(skb))
361 fcoe_check_wait_queue(lport, skb);
362
363 return 0;
364}
365
366/**
367 * bnx2fc_rcv - This is bnx2fc's receive function called by NET_RX_SOFTIRQ
368 *
369 * @skb: the receive socket buffer
370 * @dev: associated net device
371 * @ptype: context
372 * @olddev: last device
373 *
374 * This function receives the packet and builds FC frame and passes it up
375 */
376static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
377 struct packet_type *ptype, struct net_device *olddev)
378{
379 struct fc_lport *lport;
380 struct bnx2fc_hba *hba;
381 struct fc_frame_header *fh;
382 struct fcoe_rcv_info *fr;
383 struct fcoe_percpu_s *bg;
384 unsigned short oxid;
385
386 hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type);
387 lport = hba->ctlr.lp;
388
389 if (unlikely(lport == NULL)) {
390 printk(KERN_ALERT PFX "bnx2fc_rcv: lport is NULL\n");
391 goto err;
392 }
393
394 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
395 printk(KERN_ALERT PFX "bnx2fc_rcv: Wrong FC type frame\n");
396 goto err;
397 }
398
399 /*
400 * Check for minimum frame length, and make sure required FCoE
401 * and FC headers are pulled into the linear data area.
402 */
403 if (unlikely((skb->len < FCOE_MIN_FRAME) ||
404 !pskb_may_pull(skb, FCOE_HEADER_LEN)))
405 goto err;
406
407 skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
408 fh = (struct fc_frame_header *) skb_transport_header(skb);
409
410 oxid = ntohs(fh->fh_ox_id);
411
412 fr = fcoe_dev_from_skb(skb);
413 fr->fr_dev = lport;
414 fr->ptype = ptype;
415
416 bg = &bnx2fc_global;
417 spin_lock_bh(&bg->fcoe_rx_list.lock);
418
419 __skb_queue_tail(&bg->fcoe_rx_list, skb);
420 if (bg->fcoe_rx_list.qlen == 1)
421 wake_up_process(bg->thread);
422
423 spin_unlock_bh(&bg->fcoe_rx_list.lock);
424
425 return 0;
426err:
427 kfree_skb(skb);
428 return -1;
429}
430
431static int bnx2fc_l2_rcv_thread(void *arg)
432{
433 struct fcoe_percpu_s *bg = arg;
434 struct sk_buff *skb;
435
436 set_user_nice(current, -20);
437 set_current_state(TASK_INTERRUPTIBLE);
438 while (!kthread_should_stop()) {
439 schedule();
440 set_current_state(TASK_RUNNING);
441 spin_lock_bh(&bg->fcoe_rx_list.lock);
442 while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) {
443 spin_unlock_bh(&bg->fcoe_rx_list.lock);
444 bnx2fc_recv_frame(skb);
445 spin_lock_bh(&bg->fcoe_rx_list.lock);
446 }
447 spin_unlock_bh(&bg->fcoe_rx_list.lock);
448 set_current_state(TASK_INTERRUPTIBLE);
449 }
450 set_current_state(TASK_RUNNING);
451 return 0;
452}
453
454
455static void bnx2fc_recv_frame(struct sk_buff *skb)
456{
457 u32 fr_len;
458 struct fc_lport *lport;
459 struct fcoe_rcv_info *fr;
460 struct fcoe_dev_stats *stats;
461 struct fc_frame_header *fh;
462 struct fcoe_crc_eof crc_eof;
463 struct fc_frame *fp;
464 struct fc_lport *vn_port;
465 struct fcoe_port *port;
466 u8 *mac = NULL;
467 u8 *dest_mac = NULL;
468 struct fcoe_hdr *hp;
469
470 fr = fcoe_dev_from_skb(skb);
471 lport = fr->fr_dev;
472 if (unlikely(lport == NULL)) {
473 printk(KERN_ALERT PFX "Invalid lport struct\n");
474 kfree_skb(skb);
475 return;
476 }
477
478 if (skb_is_nonlinear(skb))
479 skb_linearize(skb);
480 mac = eth_hdr(skb)->h_source;
481 dest_mac = eth_hdr(skb)->h_dest;
482
483 /* Pull the header */
484 hp = (struct fcoe_hdr *) skb_network_header(skb);
485 fh = (struct fc_frame_header *) skb_transport_header(skb);
486 skb_pull(skb, sizeof(struct fcoe_hdr));
487 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
488
489 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
490 stats->RxFrames++;
491 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
492
493 fp = (struct fc_frame *)skb;
494 fc_frame_init(fp);
495 fr_dev(fp) = lport;
496 fr_sof(fp) = hp->fcoe_sof;
497 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
498 put_cpu();
499 kfree_skb(skb);
500 return;
501 }
502 fr_eof(fp) = crc_eof.fcoe_eof;
503 fr_crc(fp) = crc_eof.fcoe_crc32;
504 if (pskb_trim(skb, fr_len)) {
505 put_cpu();
506 kfree_skb(skb);
507 return;
508 }
509
510 fh = fc_frame_header_get(fp);
511
512 vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
513 if (vn_port) {
514 port = lport_priv(vn_port);
515 if (compare_ether_addr(port->data_src_addr, dest_mac)
516 != 0) {
517 BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
518 put_cpu();
519 kfree_skb(skb);
520 return;
521 }
522 }
523 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
524 fh->fh_type == FC_TYPE_FCP) {
525 /* Drop FCP data. We dont this in L2 path */
526 put_cpu();
527 kfree_skb(skb);
528 return;
529 }
530 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
531 fh->fh_type == FC_TYPE_ELS) {
532 switch (fc_frame_payload_op(fp)) {
533 case ELS_LOGO:
534 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
535 /* drop non-FIP LOGO */
536 put_cpu();
537 kfree_skb(skb);
538 return;
539 }
540 break;
541 }
542 }
543 if (le32_to_cpu(fr_crc(fp)) !=
544 ~crc32(~0, skb->data, fr_len)) {
545 if (stats->InvalidCRCCount < 5)
546 printk(KERN_WARNING PFX "dropping frame with "
547 "CRC error\n");
548 stats->InvalidCRCCount++;
549 put_cpu();
550 kfree_skb(skb);
551 return;
552 }
553 put_cpu();
554 fc_exch_recv(lport, fp);
555}
556
557/**
558 * bnx2fc_percpu_io_thread - thread per cpu for ios
559 *
560 * @arg: ptr to bnx2fc_percpu_info structure
561 */
562int bnx2fc_percpu_io_thread(void *arg)
563{
564 struct bnx2fc_percpu_s *p = arg;
565 struct bnx2fc_work *work, *tmp;
566 LIST_HEAD(work_list);
567
568 set_user_nice(current, -20);
569 set_current_state(TASK_INTERRUPTIBLE);
570 while (!kthread_should_stop()) {
571 schedule();
572 set_current_state(TASK_RUNNING);
573 spin_lock_bh(&p->fp_work_lock);
574 while (!list_empty(&p->work_list)) {
575 list_splice_init(&p->work_list, &work_list);
576 spin_unlock_bh(&p->fp_work_lock);
577
578 list_for_each_entry_safe(work, tmp, &work_list, list) {
579 list_del_init(&work->list);
580 bnx2fc_process_cq_compl(work->tgt, work->wqe);
581 kfree(work);
582 }
583
584 spin_lock_bh(&p->fp_work_lock);
585 }
586 spin_unlock_bh(&p->fp_work_lock);
587 set_current_state(TASK_INTERRUPTIBLE);
588 }
589 set_current_state(TASK_RUNNING);
590
591 return 0;
592}
593
594static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
595{
596 struct fc_host_statistics *bnx2fc_stats;
597 struct fc_lport *lport = shost_priv(shost);
598 struct fcoe_port *port = lport_priv(lport);
599 struct bnx2fc_hba *hba = port->priv;
600 struct fcoe_statistics_params *fw_stats;
601 int rc = 0;
602
603 fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer;
604 if (!fw_stats)
605 return NULL;
606
607 bnx2fc_stats = fc_get_host_stats(shost);
608
609 init_completion(&hba->stat_req_done);
610 if (bnx2fc_send_stat_req(hba))
611 return bnx2fc_stats;
612 rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
613 if (!rc) {
614 BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
615 return bnx2fc_stats;
616 }
617 bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat1.fc_crc_cnt;
618 bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
619 bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
620 bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;
621 bnx2fc_stats->rx_words += (fw_stats->rx_stat0.fcoe_rx_byte_cnt) / 4;
622
623 bnx2fc_stats->dumped_frames = 0;
624 bnx2fc_stats->lip_count = 0;
625 bnx2fc_stats->nos_count = 0;
626 bnx2fc_stats->loss_of_sync_count = 0;
627 bnx2fc_stats->loss_of_signal_count = 0;
628 bnx2fc_stats->prim_seq_protocol_err_count = 0;
629
630 return bnx2fc_stats;
631}
632
633static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
634{
635 struct fcoe_port *port = lport_priv(lport);
636 struct bnx2fc_hba *hba = port->priv;
637 struct Scsi_Host *shost = lport->host;
638 int rc = 0;
639
640 shost->max_cmd_len = BNX2FC_MAX_CMD_LEN;
641 shost->max_lun = BNX2FC_MAX_LUN;
642 shost->max_id = BNX2FC_MAX_FCP_TGT;
643 shost->max_channel = 0;
644 if (lport->vport)
645 shost->transportt = bnx2fc_vport_xport_template;
646 else
647 shost->transportt = bnx2fc_transport_template;
648
649 /* Add the new host to SCSI-ml */
650 rc = scsi_add_host(lport->host, dev);
651 if (rc) {
652 printk(KERN_ERR PFX "Error on scsi_add_host\n");
653 return rc;
654 }
655 if (!lport->vport)
656 fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
657 sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
658 BNX2FC_NAME, BNX2FC_VERSION,
659 hba->netdev->name);
660
661 return 0;
662}
663
664static int bnx2fc_mfs_update(struct fc_lport *lport)
665{
666 struct fcoe_port *port = lport_priv(lport);
667 struct bnx2fc_hba *hba = port->priv;
668 struct net_device *netdev = hba->netdev;
669 u32 mfs;
670 u32 max_mfs;
671
672 mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
673 sizeof(struct fcoe_crc_eof));
674 max_mfs = BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header);
675 BNX2FC_HBA_DBG(lport, "mfs = %d, max_mfs = %d\n", mfs, max_mfs);
676 if (mfs > max_mfs)
677 mfs = max_mfs;
678
679 /* Adjust mfs to be a multiple of 256 bytes */
680 mfs = (((mfs - sizeof(struct fc_frame_header)) / BNX2FC_MIN_PAYLOAD) *
681 BNX2FC_MIN_PAYLOAD);
682 mfs = mfs + sizeof(struct fc_frame_header);
683
684 BNX2FC_HBA_DBG(lport, "Set MFS = %d\n", mfs);
685 if (fc_set_mfs(lport, mfs))
686 return -EINVAL;
687 return 0;
688}
689static void bnx2fc_link_speed_update(struct fc_lport *lport)
690{
691 struct fcoe_port *port = lport_priv(lport);
692 struct bnx2fc_hba *hba = port->priv;
693 struct net_device *netdev = hba->netdev;
694 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
695
696 if (!dev_ethtool_get_settings(netdev, &ecmd)) {
697 lport->link_supported_speeds &=
698 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
699 if (ecmd.supported & (SUPPORTED_1000baseT_Half |
700 SUPPORTED_1000baseT_Full))
701 lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
702 if (ecmd.supported & SUPPORTED_10000baseT_Full)
703 lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
704
705 if (ecmd.speed == SPEED_1000)
706 lport->link_speed = FC_PORTSPEED_1GBIT;
707 if (ecmd.speed == SPEED_10000)
708 lport->link_speed = FC_PORTSPEED_10GBIT;
709 }
710 return;
711}
712static int bnx2fc_link_ok(struct fc_lport *lport)
713{
714 struct fcoe_port *port = lport_priv(lport);
715 struct bnx2fc_hba *hba = port->priv;
716 struct net_device *dev = hba->phys_dev;
717 int rc = 0;
718
719 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev))
720 clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
721 else {
722 set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
723 rc = -1;
724 }
725 return rc;
726}
727
728/**
729 * bnx2fc_get_link_state - get network link state
730 *
731 * @hba: adapter instance pointer
732 *
733 * updates adapter structure flag based on netdev state
734 */
735void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
736{
737 if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
738 set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
739 else
740 clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
741}
742
743static int bnx2fc_net_config(struct fc_lport *lport)
744{
745 struct bnx2fc_hba *hba;
746 struct fcoe_port *port;
747 u64 wwnn, wwpn;
748
749 port = lport_priv(lport);
750 hba = port->priv;
751
752 /* require support for get_pauseparam ethtool op. */
753 if (!hba->phys_dev->ethtool_ops ||
754 !hba->phys_dev->ethtool_ops->get_pauseparam)
755 return -EOPNOTSUPP;
756
757 if (bnx2fc_mfs_update(lport))
758 return -EINVAL;
759
760 skb_queue_head_init(&port->fcoe_pending_queue);
761 port->fcoe_pending_queue_active = 0;
762 setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
763
764 bnx2fc_link_speed_update(lport);
765
766 if (!lport->vport) {
767 wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0);
768 BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
769 fc_set_wwnn(lport, wwnn);
770
771 wwpn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 2, 0);
772 BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
773 fc_set_wwpn(lport, wwpn);
774 }
775
776 return 0;
777}
778
779static void bnx2fc_destroy_timer(unsigned long data)
780{
781 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
782
783 BNX2FC_HBA_DBG(hba->ctlr.lp, "ERROR:bnx2fc_destroy_timer - "
784 "Destroy compl not received!!\n");
785 hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
786 wake_up_interruptible(&hba->destroy_wait);
787}
788
789/**
790 * bnx2fc_indicate_netevent - Generic netdev event handler
791 *
792 * @context: adapter structure pointer
793 * @event: event type
794 *
795 * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and
796 * NETDEV_CHANGE_MTU events
797 */
798static void bnx2fc_indicate_netevent(void *context, unsigned long event)
799{
800 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
801 struct fc_lport *lport = hba->ctlr.lp;
802 struct fc_lport *vport;
803 u32 link_possible = 1;
804
805 if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
806 BNX2FC_MISC_DBG("driver not ready. event=%s %ld\n",
807 hba->netdev->name, event);
808 return;
809 }
810
811 /*
812 * ASSUMPTION:
813 * indicate_netevent cannot be called from cnic unless bnx2fc
814 * does register_device
815 */
816 BUG_ON(!lport);
817
818 BNX2FC_HBA_DBG(lport, "enter netevent handler - event=%s %ld\n",
819 hba->netdev->name, event);
820
821 switch (event) {
822 case NETDEV_UP:
823 BNX2FC_HBA_DBG(lport, "Port up, adapter_state = %ld\n",
824 hba->adapter_state);
825 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
826 printk(KERN_ERR "indicate_netevent: "\
827 "adapter is not UP!!\n");
828 /* fall thru to update mfs if MTU has changed */
829 case NETDEV_CHANGEMTU:
830 BNX2FC_HBA_DBG(lport, "NETDEV_CHANGEMTU event\n");
831 bnx2fc_mfs_update(lport);
832 mutex_lock(&lport->lp_mutex);
833 list_for_each_entry(vport, &lport->vports, list)
834 bnx2fc_mfs_update(vport);
835 mutex_unlock(&lport->lp_mutex);
836 break;
837
838 case NETDEV_DOWN:
839 BNX2FC_HBA_DBG(lport, "Port down\n");
840 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
841 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
842 link_possible = 0;
843 break;
844
845 case NETDEV_GOING_DOWN:
846 BNX2FC_HBA_DBG(lport, "Port going down\n");
847 set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
848 link_possible = 0;
849 break;
850
851 case NETDEV_CHANGE:
852 BNX2FC_HBA_DBG(lport, "NETDEV_CHANGE\n");
853 break;
854
855 default:
856 printk(KERN_ERR PFX "Unkonwn netevent %ld", event);
857 return;
858 }
859
860 bnx2fc_link_speed_update(lport);
861
862 if (link_possible && !bnx2fc_link_ok(lport)) {
863 printk(KERN_ERR "indicate_netevent: call ctlr_link_up\n");
864 fcoe_ctlr_link_up(&hba->ctlr);
865 } else {
866 printk(KERN_ERR "indicate_netevent: call ctlr_link_down\n");
867 if (fcoe_ctlr_link_down(&hba->ctlr)) {
868 clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
869 mutex_lock(&lport->lp_mutex);
870 list_for_each_entry(vport, &lport->vports, list)
871 fc_host_port_type(vport->host) =
872 FC_PORTTYPE_UNKNOWN;
873 mutex_unlock(&lport->lp_mutex);
874 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
875 per_cpu_ptr(lport->dev_stats,
876 get_cpu())->LinkFailureCount++;
877 put_cpu();
878 fcoe_clean_pending_queue(lport);
879
880 init_waitqueue_head(&hba->shutdown_wait);
881 BNX2FC_HBA_DBG(lport, "indicate_netevent "
882 "num_ofld_sess = %d\n",
883 hba->num_ofld_sess);
884 hba->wait_for_link_down = 1;
885 BNX2FC_HBA_DBG(lport, "waiting for uploads to "
886 "compl proc = %s\n",
887 current->comm);
888 wait_event_interruptible(hba->shutdown_wait,
889 (hba->num_ofld_sess == 0));
890 BNX2FC_HBA_DBG(lport, "wakeup - num_ofld_sess = %d\n",
891 hba->num_ofld_sess);
892 hba->wait_for_link_down = 0;
893
894 if (signal_pending(current))
895 flush_signals(current);
896 }
897 }
898}
899
900static int bnx2fc_libfc_config(struct fc_lport *lport)
901{
902
903 /* Set the function pointers set by bnx2fc driver */
904 memcpy(&lport->tt, &bnx2fc_libfc_fcn_templ,
905 sizeof(struct libfc_function_template));
906 fc_elsct_init(lport);
907 fc_exch_init(lport);
908 fc_rport_init(lport);
909 fc_disc_init(lport);
910 return 0;
911}
912
913static int bnx2fc_em_config(struct fc_lport *lport)
914{
915 struct fcoe_port *port = lport_priv(lport);
916 struct bnx2fc_hba *hba = port->priv;
917
918 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
919 FCOE_MAX_XID, NULL)) {
920 printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
921 return -ENOMEM;
922 }
923
924 hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
925 BNX2FC_MAX_XID);
926
927 if (!hba->cmd_mgr) {
928 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
929 fc_exch_mgr_free(lport);
930 return -ENOMEM;
931 }
932 return 0;
933}
934
935static int bnx2fc_lport_config(struct fc_lport *lport)
936{
937 lport->link_up = 0;
938 lport->qfull = 0;
939 lport->max_retry_count = 3;
940 lport->max_rport_retry_count = 3;
941 lport->e_d_tov = 2 * 1000;
942 lport->r_a_tov = 10 * 1000;
943
944 /* REVISIT: enable when supporting tape devices
945 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
946 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
947 */
948 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS);
949 lport->does_npiv = 1;
950
951 memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen));
952 lport->rnid_gen.rnid_atype = BNX2FC_RNID_HBA;
953
954 /* alloc stats structure */
955 if (fc_lport_init_stats(lport))
956 return -ENOMEM;
957
958 /* Finish fc_lport configuration */
959 fc_lport_config(lport);
960
961 return 0;
962}
963
964/**
965 * bnx2fc_fip_recv - handle a received FIP frame.
966 *
967 * @skb: the received skb
968 * @dev: associated &net_device
969 * @ptype: the &packet_type structure which was used to register this handler.
970 * @orig_dev: original receive &net_device, in case @ dev is a bond.
971 *
972 * Returns: 0 for success
973 */
974static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
975 struct packet_type *ptype,
976 struct net_device *orig_dev)
977{
978 struct bnx2fc_hba *hba;
979 hba = container_of(ptype, struct bnx2fc_hba, fip_packet_type);
980 fcoe_ctlr_recv(&hba->ctlr, skb);
981 return 0;
982}
983
984/**
985 * bnx2fc_update_src_mac - Update Ethernet MAC filters.
986 *
987 * @fip: FCoE controller.
988 * @old: Unicast MAC address to delete if the MAC is non-zero.
989 * @new: Unicast MAC address to add.
990 *
991 * Remove any previously-set unicast MAC filter.
992 * Add secondary FCoE MAC address filter for our OUI.
993 */
994static void bnx2fc_update_src_mac(struct fc_lport *lport, u8 *addr)
995{
996 struct fcoe_port *port = lport_priv(lport);
997
998 memcpy(port->data_src_addr, addr, ETH_ALEN);
999}
1000
1001/**
1002 * bnx2fc_get_src_mac - return the ethernet source address for an lport
1003 *
1004 * @lport: libfc port
1005 */
1006static u8 *bnx2fc_get_src_mac(struct fc_lport *lport)
1007{
1008 struct fcoe_port *port;
1009
1010 port = (struct fcoe_port *)lport_priv(lport);
1011 return port->data_src_addr;
1012}
1013
1014/**
1015 * bnx2fc_fip_send - send an Ethernet-encapsulated FIP frame.
1016 *
1017 * @fip: FCoE controller.
1018 * @skb: FIP Packet.
1019 */
1020static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
1021{
1022 skb->dev = bnx2fc_from_ctlr(fip)->netdev;
1023 dev_queue_xmit(skb);
1024}
1025
1026static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
1027{
1028 struct Scsi_Host *shost = vport_to_shost(vport);
1029 struct fc_lport *n_port = shost_priv(shost);
1030 struct fcoe_port *port = lport_priv(n_port);
1031 struct bnx2fc_hba *hba = port->priv;
1032 struct net_device *netdev = hba->netdev;
1033 struct fc_lport *vn_port;
1034
1035 if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
1036 printk(KERN_ERR PFX "vn ports cannot be created on"
1037 "this hba\n");
1038 return -EIO;
1039 }
1040 mutex_lock(&bnx2fc_dev_lock);
1041 vn_port = bnx2fc_if_create(hba, &vport->dev, 1);
1042 mutex_unlock(&bnx2fc_dev_lock);
1043
1044 if (IS_ERR(vn_port)) {
1045 printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n",
1046 netdev->name);
1047 return -EIO;
1048 }
1049
1050 if (disabled) {
1051 fc_vport_set_state(vport, FC_VPORT_DISABLED);
1052 } else {
1053 vn_port->boot_time = jiffies;
1054 fc_lport_init(vn_port);
1055 fc_fabric_login(vn_port);
1056 fc_vport_setlink(vn_port);
1057 }
1058 return 0;
1059}
1060
1061static int bnx2fc_vport_destroy(struct fc_vport *vport)
1062{
1063 struct Scsi_Host *shost = vport_to_shost(vport);
1064 struct fc_lport *n_port = shost_priv(shost);
1065 struct fc_lport *vn_port = vport->dd_data;
1066 struct fcoe_port *port = lport_priv(vn_port);
1067
1068 mutex_lock(&n_port->lp_mutex);
1069 list_del(&vn_port->list);
1070 mutex_unlock(&n_port->lp_mutex);
1071 queue_work(bnx2fc_wq, &port->destroy_work);
1072 return 0;
1073}
1074
1075static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
1076{
1077 struct fc_lport *lport = vport->dd_data;
1078
1079 if (disable) {
1080 fc_vport_set_state(vport, FC_VPORT_DISABLED);
1081 fc_fabric_logoff(lport);
1082 } else {
1083 lport->boot_time = jiffies;
1084 fc_fabric_login(lport);
1085 fc_vport_setlink(lport);
1086 }
1087 return 0;
1088}
1089
1090
1091static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
1092{
1093 struct net_device *netdev = hba->netdev;
1094 struct net_device *physdev = hba->phys_dev;
1095 struct netdev_hw_addr *ha;
1096 int sel_san_mac = 0;
1097
1098 /* Do not support for bonding device */
1099 if ((netdev->priv_flags & IFF_MASTER_ALB) ||
1100 (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
1101 (netdev->priv_flags & IFF_MASTER_8023AD)) {
1102 return -EOPNOTSUPP;
1103 }
1104
1105 /* setup Source MAC Address */
1106 rcu_read_lock();
1107 for_each_dev_addr(physdev, ha) {
1108 BNX2FC_MISC_DBG("net_config: ha->type = %d, fip_mac = ",
1109 ha->type);
1110 printk(KERN_INFO "%2x:%2x:%2x:%2x:%2x:%2x\n", ha->addr[0],
1111 ha->addr[1], ha->addr[2], ha->addr[3],
1112 ha->addr[4], ha->addr[5]);
1113
1114 if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
1115 (is_valid_ether_addr(ha->addr))) {
1116 memcpy(hba->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
1117 sel_san_mac = 1;
1118 BNX2FC_MISC_DBG("Found SAN MAC\n");
1119 }
1120 }
1121 rcu_read_unlock();
1122
1123 if (!sel_san_mac)
1124 return -ENODEV;
1125
1126 hba->fip_packet_type.func = bnx2fc_fip_recv;
1127 hba->fip_packet_type.type = htons(ETH_P_FIP);
1128 hba->fip_packet_type.dev = netdev;
1129 dev_add_pack(&hba->fip_packet_type);
1130
1131 hba->fcoe_packet_type.func = bnx2fc_rcv;
1132 hba->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
1133 hba->fcoe_packet_type.dev = netdev;
1134 dev_add_pack(&hba->fcoe_packet_type);
1135
1136 return 0;
1137}
1138
1139static int bnx2fc_attach_transport(void)
1140{
1141 bnx2fc_transport_template =
1142 fc_attach_transport(&bnx2fc_transport_function);
1143
1144 if (bnx2fc_transport_template == NULL) {
1145 printk(KERN_ERR PFX "Failed to attach FC transport\n");
1146 return -ENODEV;
1147 }
1148
1149 bnx2fc_vport_xport_template =
1150 fc_attach_transport(&bnx2fc_vport_xport_function);
1151 if (bnx2fc_vport_xport_template == NULL) {
1152 printk(KERN_ERR PFX
1153 "Failed to attach FC transport for vport\n");
1154 fc_release_transport(bnx2fc_transport_template);
1155 bnx2fc_transport_template = NULL;
1156 return -ENODEV;
1157 }
1158 return 0;
1159}
1160static void bnx2fc_release_transport(void)
1161{
1162 fc_release_transport(bnx2fc_transport_template);
1163 fc_release_transport(bnx2fc_vport_xport_template);
1164 bnx2fc_transport_template = NULL;
1165 bnx2fc_vport_xport_template = NULL;
1166}
1167
1168static void bnx2fc_interface_release(struct kref *kref)
1169{
1170 struct bnx2fc_hba *hba;
1171 struct net_device *netdev;
1172 struct net_device *phys_dev;
1173
1174 hba = container_of(kref, struct bnx2fc_hba, kref);
1175 BNX2FC_HBA_DBG(hba->ctlr.lp, "Interface is being released\n");
1176
1177 netdev = hba->netdev;
1178 phys_dev = hba->phys_dev;
1179
1180 /* tear-down FIP controller */
1181 if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done))
1182 fcoe_ctlr_destroy(&hba->ctlr);
1183
1184 /* Free the command manager */
1185 if (hba->cmd_mgr) {
1186 bnx2fc_cmd_mgr_free(hba->cmd_mgr);
1187 hba->cmd_mgr = NULL;
1188 }
1189 dev_put(netdev);
1190 module_put(THIS_MODULE);
1191}
1192
1193static inline void bnx2fc_interface_get(struct bnx2fc_hba *hba)
1194{
1195 kref_get(&hba->kref);
1196}
1197
1198static inline void bnx2fc_interface_put(struct bnx2fc_hba *hba)
1199{
1200 kref_put(&hba->kref, bnx2fc_interface_release);
1201}
1202static void bnx2fc_interface_destroy(struct bnx2fc_hba *hba)
1203{
1204 bnx2fc_unbind_pcidev(hba);
1205 kfree(hba);
1206}
1207
1208/**
1209 * bnx2fc_interface_create - create a new fcoe instance
1210 *
1211 * @cnic: pointer to cnic device
1212 *
1213 * Creates a new FCoE instance on the given device which include allocating
1214 * hba structure, scsi_host and lport structures.
1215 */
1216static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic)
1217{
1218 struct bnx2fc_hba *hba;
1219 int rc;
1220
1221 hba = kzalloc(sizeof(*hba), GFP_KERNEL);
1222 if (!hba) {
1223 printk(KERN_ERR PFX "Unable to allocate hba structure\n");
1224 return NULL;
1225 }
1226 spin_lock_init(&hba->hba_lock);
1227 mutex_init(&hba->hba_mutex);
1228
1229 hba->cnic = cnic;
1230 rc = bnx2fc_bind_pcidev(hba);
1231 if (rc)
1232 goto bind_err;
1233 hba->phys_dev = cnic->netdev;
1234 /* will get overwritten after we do vlan discovery */
1235 hba->netdev = hba->phys_dev;
1236
1237 init_waitqueue_head(&hba->shutdown_wait);
1238 init_waitqueue_head(&hba->destroy_wait);
1239
1240 return hba;
1241bind_err:
1242 printk(KERN_ERR PFX "create_interface: bind error\n");
1243 kfree(hba);
1244 return NULL;
1245}
1246
1247static int bnx2fc_interface_setup(struct bnx2fc_hba *hba,
1248 enum fip_state fip_mode)
1249{
1250 int rc = 0;
1251 struct net_device *netdev = hba->netdev;
1252 struct fcoe_ctlr *fip = &hba->ctlr;
1253
1254 dev_hold(netdev);
1255 kref_init(&hba->kref);
1256
1257 hba->flags = 0;
1258
1259 /* Initialize FIP */
1260 memset(fip, 0, sizeof(*fip));
1261 fcoe_ctlr_init(fip, fip_mode);
1262 hba->ctlr.send = bnx2fc_fip_send;
1263 hba->ctlr.update_mac = bnx2fc_update_src_mac;
1264 hba->ctlr.get_src_addr = bnx2fc_get_src_mac;
1265 set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done);
1266
1267 rc = bnx2fc_netdev_setup(hba);
1268 if (rc)
1269 goto setup_err;
1270
1271 hba->next_conn_id = 0;
1272
1273 memset(hba->tgt_ofld_list, 0, sizeof(hba->tgt_ofld_list));
1274 hba->num_ofld_sess = 0;
1275
1276 return 0;
1277
1278setup_err:
1279 fcoe_ctlr_destroy(&hba->ctlr);
1280 dev_put(netdev);
1281 bnx2fc_interface_put(hba);
1282 return rc;
1283}
1284
1285/**
1286 * bnx2fc_if_create - Create FCoE instance on a given interface
1287 *
1288 * @hba: FCoE interface to create a local port on
1289 * @parent: Device pointer to be the parent in sysfs for the SCSI host
1290 * @npiv: Indicates if the port is vport or not
1291 *
1292 * Creates a fc_lport instance and a Scsi_Host instance and configure them.
1293 *
1294 * Returns: Allocated fc_lport or an error pointer
1295 */
1296static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1297 struct device *parent, int npiv)
1298{
1299 struct fc_lport *lport = NULL;
1300 struct fcoe_port *port;
1301 struct Scsi_Host *shost;
1302 struct fc_vport *vport = dev_to_vport(parent);
1303 int rc = 0;
1304
1305 /* Allocate Scsi_Host structure */
1306 if (!npiv) {
1307 lport = libfc_host_alloc(&bnx2fc_shost_template,
1308 sizeof(struct fcoe_port));
1309 } else {
1310 lport = libfc_vport_create(vport,
1311 sizeof(struct fcoe_port));
1312 }
1313
1314 if (!lport) {
1315 printk(KERN_ERR PFX "could not allocate scsi host structure\n");
1316 return NULL;
1317 }
1318 shost = lport->host;
1319 port = lport_priv(lport);
1320 port->lport = lport;
1321 port->priv = hba;
1322 INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
1323
1324 /* Configure fcoe_port */
1325 rc = bnx2fc_lport_config(lport);
1326 if (rc)
1327 goto lp_config_err;
1328
1329 if (npiv) {
1330 vport = dev_to_vport(parent);
1331 printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n",
1332 vport->node_name, vport->port_name);
1333 fc_set_wwnn(lport, vport->node_name);
1334 fc_set_wwpn(lport, vport->port_name);
1335 }
1336 /* Configure netdev and networking properties of the lport */
1337 rc = bnx2fc_net_config(lport);
1338 if (rc) {
1339 printk(KERN_ERR PFX "Error on bnx2fc_net_config\n");
1340 goto lp_config_err;
1341 }
1342
1343 rc = bnx2fc_shost_config(lport, parent);
1344 if (rc) {
1345 printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
1346 hba->netdev->name);
1347 goto lp_config_err;
1348 }
1349
1350 /* Initialize the libfc library */
1351 rc = bnx2fc_libfc_config(lport);
1352 if (rc) {
1353 printk(KERN_ERR PFX "Couldnt configure libfc\n");
1354 goto shost_err;
1355 }
1356 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
1357
1358 /* Allocate exchange manager */
1359 if (!npiv) {
1360 rc = bnx2fc_em_config(lport);
1361 if (rc) {
1362 printk(KERN_ERR PFX "Error on bnx2fc_em_config\n");
1363 goto shost_err;
1364 }
1365 }
1366
1367 bnx2fc_interface_get(hba);
1368 return lport;
1369
1370shost_err:
1371 scsi_remove_host(shost);
1372lp_config_err:
1373 scsi_host_put(lport->host);
1374 return NULL;
1375}
1376
1377static void bnx2fc_netdev_cleanup(struct bnx2fc_hba *hba)
1378{
1379 /* Dont listen for Ethernet packets anymore */
1380 __dev_remove_pack(&hba->fcoe_packet_type);
1381 __dev_remove_pack(&hba->fip_packet_type);
1382 synchronize_net();
1383}
1384
1385static void bnx2fc_if_destroy(struct fc_lport *lport)
1386{
1387 struct fcoe_port *port = lport_priv(lport);
1388 struct bnx2fc_hba *hba = port->priv;
1389
1390 BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n");
1391 /* Stop the transmit retry timer */
1392 del_timer_sync(&port->timer);
1393
1394 /* Free existing transmit skbs */
1395 fcoe_clean_pending_queue(lport);
1396
1397 bnx2fc_interface_put(hba);
1398
1399 /* Free queued packets for the receive thread */
1400 bnx2fc_clean_rx_queue(lport);
1401
1402 /* Detach from scsi-ml */
1403 fc_remove_host(lport->host);
1404 scsi_remove_host(lport->host);
1405
1406 /*
1407 * Note that only the physical lport will have the exchange manager.
1408 * for vports, this function is NOP
1409 */
1410 fc_exch_mgr_free(lport);
1411
1412 /* Free memory used by statistical counters */
1413 fc_lport_free_stats(lport);
1414
1415 /* Release Scsi_Host */
1416 scsi_host_put(lport->host);
1417}
1418
1419/**
1420 * bnx2fc_destroy - Destroy a bnx2fc FCoE interface
1421 *
1422 * @buffer: The name of the Ethernet interface to be destroyed
1423 * @kp: The associated kernel parameter
1424 *
1425 * Called from sysfs.
1426 *
1427 * Returns: 0 for success
1428 */
1429static int bnx2fc_destroy(struct net_device *netdev)
1430{
1431 struct bnx2fc_hba *hba = NULL;
1432 struct net_device *phys_dev;
1433 int rc = 0;
1434
1435 if (!rtnl_trylock())
1436 return restart_syscall();
1437
1438 mutex_lock(&bnx2fc_dev_lock);
1439#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE
1440 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1441 rc = -ENODEV;
1442 goto netdev_err;
1443 }
1444#endif
1445 /* obtain physical netdev */
1446 if (netdev->priv_flags & IFF_802_1Q_VLAN)
1447 phys_dev = vlan_dev_real_dev(netdev);
1448 else {
1449 printk(KERN_ERR PFX "Not a vlan device\n");
1450 rc = -ENODEV;
1451 goto netdev_err;
1452 }
1453
1454 hba = bnx2fc_hba_lookup(phys_dev);
1455 if (!hba || !hba->ctlr.lp) {
1456 rc = -ENODEV;
1457 printk(KERN_ERR PFX "bnx2fc_destroy: hba or lport not found\n");
1458 goto netdev_err;
1459 }
1460
1461 if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
1462 printk(KERN_ERR PFX "bnx2fc_destroy: Create not called\n");
1463 goto netdev_err;
1464 }
1465
1466 bnx2fc_netdev_cleanup(hba);
1467
1468 bnx2fc_stop(hba);
1469
1470 bnx2fc_if_destroy(hba->ctlr.lp);
1471
1472 destroy_workqueue(hba->timer_work_queue);
1473
1474 if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
1475 bnx2fc_fw_destroy(hba);
1476
1477 clear_bit(BNX2FC_CREATE_DONE, &hba->init_done);
1478netdev_err:
1479 mutex_unlock(&bnx2fc_dev_lock);
1480 rtnl_unlock();
1481 return rc;
1482}
1483
1484static void bnx2fc_destroy_work(struct work_struct *work)
1485{
1486 struct fcoe_port *port;
1487 struct fc_lport *lport;
1488
1489 port = container_of(work, struct fcoe_port, destroy_work);
1490 lport = port->lport;
1491
1492 BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
1493
1494 bnx2fc_port_shutdown(lport);
1495 rtnl_lock();
1496 mutex_lock(&bnx2fc_dev_lock);
1497 bnx2fc_if_destroy(lport);
1498 mutex_unlock(&bnx2fc_dev_lock);
1499 rtnl_unlock();
1500}
1501
1502static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba)
1503{
1504 bnx2fc_free_fw_resc(hba);
1505 bnx2fc_free_task_ctx(hba);
1506}
1507
1508/**
1509 * bnx2fc_bind_adapter_devices - binds bnx2fc adapter with the associated
1510 * pci structure
1511 *
1512 * @hba: Adapter instance
1513 */
1514static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba)
1515{
1516 if (bnx2fc_setup_task_ctx(hba))
1517 goto mem_err;
1518
1519 if (bnx2fc_setup_fw_resc(hba))
1520 goto mem_err;
1521
1522 return 0;
1523mem_err:
1524 bnx2fc_unbind_adapter_devices(hba);
1525 return -ENOMEM;
1526}
1527
1528static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
1529{
1530 struct cnic_dev *cnic;
1531
1532 if (!hba->cnic) {
1533 printk(KERN_ERR PFX "cnic is NULL\n");
1534 return -ENODEV;
1535 }
1536 cnic = hba->cnic;
1537 hba->pcidev = cnic->pcidev;
1538 if (hba->pcidev)
1539 pci_dev_get(hba->pcidev);
1540
1541 return 0;
1542}
1543
1544static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
1545{
1546 if (hba->pcidev)
1547 pci_dev_put(hba->pcidev);
1548 hba->pcidev = NULL;
1549}
1550
1551
1552
1553/**
1554 * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance
1555 *
1556 * @handle: transport handle pointing to adapter struture
1557 *
1558 * This function maps adapter structure to pcidev structure and initiates
1559 * firmware handshake to enable/initialize on-chip FCoE components.
1560 * This bnx2fc - cnic interface api callback is used after following
1561 * conditions are met -
1562 * a) underlying network interface is up (marked by event NETDEV_UP
1563 * from netdev
1564 * b) bnx2fc adatper structure is registered.
1565 */
1566static void bnx2fc_ulp_start(void *handle)
1567{
1568 struct bnx2fc_hba *hba = handle;
1569 struct fc_lport *lport = hba->ctlr.lp;
1570
1571 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1572 mutex_lock(&bnx2fc_dev_lock);
1573
1574 if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
1575 goto start_disc;
1576
1577 if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done))
1578 bnx2fc_fw_init(hba);
1579
1580start_disc:
1581 mutex_unlock(&bnx2fc_dev_lock);
1582
1583 BNX2FC_MISC_DBG("bnx2fc started.\n");
1584
1585 /* Kick off Fabric discovery*/
1586 if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
1587 printk(KERN_ERR PFX "ulp_init: start discovery\n");
1588 lport->tt.frame_send = bnx2fc_xmit;
1589 bnx2fc_start_disc(hba);
1590 }
1591}
1592
1593static void bnx2fc_port_shutdown(struct fc_lport *lport)
1594{
1595 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1596 fc_fabric_logoff(lport);
1597 fc_lport_destroy(lport);
1598}
1599
1600static void bnx2fc_stop(struct bnx2fc_hba *hba)
1601{
1602 struct fc_lport *lport;
1603 struct fc_lport *vport;
1604
1605 BNX2FC_MISC_DBG("ENTERED %s - init_done = %ld\n", __func__,
1606 hba->init_done);
1607 if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done) &&
1608 test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
1609 lport = hba->ctlr.lp;
1610 bnx2fc_port_shutdown(lport);
1611 BNX2FC_HBA_DBG(lport, "bnx2fc_stop: waiting for %d "
1612 "offloaded sessions\n",
1613 hba->num_ofld_sess);
1614 wait_event_interruptible(hba->shutdown_wait,
1615 (hba->num_ofld_sess == 0));
1616 mutex_lock(&lport->lp_mutex);
1617 list_for_each_entry(vport, &lport->vports, list)
1618 fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN;
1619 mutex_unlock(&lport->lp_mutex);
1620 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
1621 fcoe_ctlr_link_down(&hba->ctlr);
1622 fcoe_clean_pending_queue(lport);
1623
1624 mutex_lock(&hba->hba_mutex);
1625 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1626 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
1627
1628 clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
1629 mutex_unlock(&hba->hba_mutex);
1630 }
1631}
1632
1633static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
1634{
1635#define BNX2FC_INIT_POLL_TIME (1000 / HZ)
1636 int rc = -1;
1637 int i = HZ;
1638
1639 rc = bnx2fc_bind_adapter_devices(hba);
1640 if (rc) {
1641 printk(KERN_ALERT PFX
1642 "bnx2fc_bind_adapter_devices failed - rc = %d\n", rc);
1643 goto err_out;
1644 }
1645
1646 rc = bnx2fc_send_fw_fcoe_init_msg(hba);
1647 if (rc) {
1648 printk(KERN_ALERT PFX
1649 "bnx2fc_send_fw_fcoe_init_msg failed - rc = %d\n", rc);
1650 goto err_unbind;
1651 }
1652
1653 /*
1654 * Wait until the adapter init message is complete, and adapter
1655 * state is UP.
1656 */
1657 while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
1658 msleep(BNX2FC_INIT_POLL_TIME);
1659
1660 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) {
1661 printk(KERN_ERR PFX "bnx2fc_start: %s failed to initialize. "
1662 "Ignoring...\n",
1663 hba->cnic->netdev->name);
1664 rc = -1;
1665 goto err_unbind;
1666 }
1667
1668
1669 /* Mark HBA to indicate that the FW INIT is done */
1670 set_bit(BNX2FC_FW_INIT_DONE, &hba->init_done);
1671 return 0;
1672
1673err_unbind:
1674 bnx2fc_unbind_adapter_devices(hba);
1675err_out:
1676 return rc;
1677}
1678
1679static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
1680{
1681 if (test_and_clear_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
1682 if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) {
1683 init_timer(&hba->destroy_timer);
1684 hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT +
1685 jiffies;
1686 hba->destroy_timer.function = bnx2fc_destroy_timer;
1687 hba->destroy_timer.data = (unsigned long)hba;
1688 add_timer(&hba->destroy_timer);
1689 wait_event_interruptible(hba->destroy_wait,
1690 (hba->flags &
1691 BNX2FC_FLAG_DESTROY_CMPL));
1692 /* This should never happen */
1693 if (signal_pending(current))
1694 flush_signals(current);
1695
1696 del_timer_sync(&hba->destroy_timer);
1697 }
1698 bnx2fc_unbind_adapter_devices(hba);
1699 }
1700}
1701
1702/**
1703 * bnx2fc_ulp_stop - cnic callback to shutdown adapter instance
1704 *
1705 * @handle: transport handle pointing to adapter structure
1706 *
1707 * Driver checks if adapter is already in shutdown mode, if not start
1708 * the shutdown process.
1709 */
1710static void bnx2fc_ulp_stop(void *handle)
1711{
1712 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)handle;
1713
1714 printk(KERN_ERR "ULP_STOP\n");
1715
1716 mutex_lock(&bnx2fc_dev_lock);
1717 bnx2fc_stop(hba);
1718 bnx2fc_fw_destroy(hba);
1719 mutex_unlock(&bnx2fc_dev_lock);
1720}
1721
1722static void bnx2fc_start_disc(struct bnx2fc_hba *hba)
1723{
1724 struct fc_lport *lport;
1725 int wait_cnt = 0;
1726
1727 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1728 /* Kick off FIP/FLOGI */
1729 if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
1730 printk(KERN_ERR PFX "Init not done yet\n");
1731 return;
1732 }
1733
1734 lport = hba->ctlr.lp;
1735 BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
1736
1737 if (!bnx2fc_link_ok(lport)) {
1738 BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
1739 fcoe_ctlr_link_up(&hba->ctlr);
1740 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1741 set_bit(ADAPTER_STATE_READY, &hba->adapter_state);
1742 }
1743
1744 /* wait for the FCF to be selected before issuing FLOGI */
1745 while (!hba->ctlr.sel_fcf) {
1746 msleep(250);
1747 /* give up after 3 secs */
1748 if (++wait_cnt > 12)
1749 break;
1750 }
1751 fc_lport_init(lport);
1752 fc_fabric_login(lport);
1753}
1754
1755
1756/**
1757 * bnx2fc_ulp_init - Initialize an adapter instance
1758 *
1759 * @dev : cnic device handle
1760 * Called from cnic_register_driver() context to initialize all
1761 * enumerated cnic devices. This routine allocates adapter structure
1762 * and other device specific resources.
1763 */
1764static void bnx2fc_ulp_init(struct cnic_dev *dev)
1765{
1766 struct bnx2fc_hba *hba;
1767 int rc = 0;
1768
1769 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1770 /* bnx2fc works only when bnx2x is loaded */
1771 if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1772 printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s,"
1773 " flags: %lx\n",
1774 dev->netdev->name, dev->flags);
1775 return;
1776 }
1777
1778 /* Configure FCoE interface */
1779 hba = bnx2fc_interface_create(dev);
1780 if (!hba) {
1781 printk(KERN_ERR PFX "hba initialization failed\n");
1782 return;
1783 }
1784
1785 /* Add HBA to the adapter list */
1786 mutex_lock(&bnx2fc_dev_lock);
1787 list_add_tail(&hba->link, &adapter_list);
1788 adapter_count++;
1789 mutex_unlock(&bnx2fc_dev_lock);
1790
1791 clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
1792 rc = dev->register_device(dev, CNIC_ULP_FCOE,
1793 (void *) hba);
1794 if (rc)
1795 printk(KERN_ALERT PFX "register_device failed, rc = %d\n", rc);
1796 else
1797 set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
1798}
1799
1800
1801static int bnx2fc_disable(struct net_device *netdev)
1802{
1803 struct bnx2fc_hba *hba;
1804 struct net_device *phys_dev;
1805 struct ethtool_drvinfo drvinfo;
1806 int rc = 0;
1807
1808 if (!rtnl_trylock()) {
1809 printk(KERN_ERR PFX "retrying for rtnl_lock\n");
1810 return -EIO;
1811 }
1812
1813 mutex_lock(&bnx2fc_dev_lock);
1814
1815 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1816 rc = -ENODEV;
1817 goto nodev;
1818 }
1819
1820 /* obtain physical netdev */
1821 if (netdev->priv_flags & IFF_802_1Q_VLAN)
1822 phys_dev = vlan_dev_real_dev(netdev);
1823 else {
1824 printk(KERN_ERR PFX "Not a vlan device\n");
1825 rc = -ENODEV;
1826 goto nodev;
1827 }
1828
1829 /* verify if the physical device is a netxtreme2 device */
1830 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
1831 memset(&drvinfo, 0, sizeof(drvinfo));
1832 phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
1833 if (strcmp(drvinfo.driver, "bnx2x")) {
1834 printk(KERN_ERR PFX "Not a netxtreme2 device\n");
1835 rc = -ENODEV;
1836 goto nodev;
1837 }
1838 } else {
1839 printk(KERN_ERR PFX "unable to obtain drv_info\n");
1840 rc = -ENODEV;
1841 goto nodev;
1842 }
1843
1844 printk(KERN_ERR PFX "phys_dev is netxtreme2 device\n");
1845
1846 /* obtain hba and initialize rest of the structure */
1847 hba = bnx2fc_hba_lookup(phys_dev);
1848 if (!hba || !hba->ctlr.lp) {
1849 rc = -ENODEV;
1850 printk(KERN_ERR PFX "bnx2fc_disable: hba or lport not found\n");
1851 } else {
1852 fcoe_ctlr_link_down(&hba->ctlr);
1853 fcoe_clean_pending_queue(hba->ctlr.lp);
1854 }
1855
1856nodev:
1857 mutex_unlock(&bnx2fc_dev_lock);
1858 rtnl_unlock();
1859 return rc;
1860}
1861
1862
1863static int bnx2fc_enable(struct net_device *netdev)
1864{
1865 struct bnx2fc_hba *hba;
1866 struct net_device *phys_dev;
1867 struct ethtool_drvinfo drvinfo;
1868 int rc = 0;
1869
1870 if (!rtnl_trylock()) {
1871 printk(KERN_ERR PFX "retrying for rtnl_lock\n");
1872 return -EIO;
1873 }
1874
1875 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1876 mutex_lock(&bnx2fc_dev_lock);
1877
1878 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1879 rc = -ENODEV;
1880 goto nodev;
1881 }
1882
1883 /* obtain physical netdev */
1884 if (netdev->priv_flags & IFF_802_1Q_VLAN)
1885 phys_dev = vlan_dev_real_dev(netdev);
1886 else {
1887 printk(KERN_ERR PFX "Not a vlan device\n");
1888 rc = -ENODEV;
1889 goto nodev;
1890 }
1891 /* verify if the physical device is a netxtreme2 device */
1892 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
1893 memset(&drvinfo, 0, sizeof(drvinfo));
1894 phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
1895 if (strcmp(drvinfo.driver, "bnx2x")) {
1896 printk(KERN_ERR PFX "Not a netxtreme2 device\n");
1897 rc = -ENODEV;
1898 goto nodev;
1899 }
1900 } else {
1901 printk(KERN_ERR PFX "unable to obtain drv_info\n");
1902 rc = -ENODEV;
1903 goto nodev;
1904 }
1905
1906 /* obtain hba and initialize rest of the structure */
1907 hba = bnx2fc_hba_lookup(phys_dev);
1908 if (!hba || !hba->ctlr.lp) {
1909 rc = -ENODEV;
1910 printk(KERN_ERR PFX "bnx2fc_enable: hba or lport not found\n");
1911 } else if (!bnx2fc_link_ok(hba->ctlr.lp))
1912 fcoe_ctlr_link_up(&hba->ctlr);
1913
1914nodev:
1915 mutex_unlock(&bnx2fc_dev_lock);
1916 rtnl_unlock();
1917 return rc;
1918}
1919
1920/**
1921 * bnx2fc_create - Create bnx2fc FCoE interface
1922 *
1923 * @buffer: The name of Ethernet interface to create on
1924 * @kp: The associated kernel param
1925 *
1926 * Called from sysfs.
1927 *
1928 * Returns: 0 for success
1929 */
1930static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1931{
1932 struct bnx2fc_hba *hba;
1933 struct net_device *phys_dev;
1934 struct fc_lport *lport;
1935 struct ethtool_drvinfo drvinfo;
1936 int rc = 0;
1937 int vlan_id;
1938
1939 BNX2FC_MISC_DBG("Entered bnx2fc_create\n");
1940 if (fip_mode != FIP_MODE_FABRIC) {
1941 printk(KERN_ERR "fip mode not FABRIC\n");
1942 return -EIO;
1943 }
1944
1945 if (!rtnl_trylock()) {
1946 printk(KERN_ERR "trying for rtnl_lock\n");
1947 return -EIO;
1948 }
1949 mutex_lock(&bnx2fc_dev_lock);
1950
1951#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE
1952 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1953 rc = -ENODEV;
1954 goto mod_err;
1955 }
1956#endif
1957
1958 if (!try_module_get(THIS_MODULE)) {
1959 rc = -EINVAL;
1960 goto mod_err;
1961 }
1962
1963 /* obtain physical netdev */
1964 if (netdev->priv_flags & IFF_802_1Q_VLAN) {
1965 phys_dev = vlan_dev_real_dev(netdev);
1966 vlan_id = vlan_dev_vlan_id(netdev);
1967 } else {
1968 printk(KERN_ERR PFX "Not a vlan device\n");
1969 rc = -EINVAL;
1970 goto netdev_err;
1971 }
1972 /* verify if the physical device is a netxtreme2 device */
1973 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
1974 memset(&drvinfo, 0, sizeof(drvinfo));
1975 phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
1976 if (strcmp(drvinfo.driver, "bnx2x")) {
1977 printk(KERN_ERR PFX "Not a netxtreme2 device\n");
1978 rc = -EINVAL;
1979 goto netdev_err;
1980 }
1981 } else {
1982 printk(KERN_ERR PFX "unable to obtain drv_info\n");
1983 rc = -EINVAL;
1984 goto netdev_err;
1985 }
1986
1987 /* obtain hba and initialize rest of the structure */
1988 hba = bnx2fc_hba_lookup(phys_dev);
1989 if (!hba) {
1990 rc = -ENODEV;
1991 printk(KERN_ERR PFX "bnx2fc_create: hba not found\n");
1992 goto netdev_err;
1993 }
1994
1995 if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
1996 rc = bnx2fc_fw_init(hba);
1997 if (rc)
1998 goto netdev_err;
1999 }
2000
2001 if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
2002 rc = -EEXIST;
2003 goto netdev_err;
2004 }
2005
2006 /* update netdev with vlan netdev */
2007 hba->netdev = netdev;
2008 hba->vlan_id = vlan_id;
2009 hba->vlan_enabled = 1;
2010
2011 rc = bnx2fc_interface_setup(hba, fip_mode);
2012 if (rc) {
2013 printk(KERN_ERR PFX "bnx2fc_interface_setup failed\n");
2014 goto ifput_err;
2015 }
2016
2017 hba->timer_work_queue =
2018 create_singlethread_workqueue("bnx2fc_timer_wq");
2019 if (!hba->timer_work_queue) {
2020 printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
2021 rc = -EINVAL;
2022 goto ifput_err;
2023 }
2024
2025 lport = bnx2fc_if_create(hba, &hba->pcidev->dev, 0);
2026 if (!lport) {
2027 printk(KERN_ERR PFX "Failed to create interface (%s)\n",
2028 netdev->name);
2029 bnx2fc_netdev_cleanup(hba);
2030 rc = -EINVAL;
2031 goto if_create_err;
2032 }
2033
2034 lport->boot_time = jiffies;
2035
2036 /* Make this master N_port */
2037 hba->ctlr.lp = lport;
2038
2039 set_bit(BNX2FC_CREATE_DONE, &hba->init_done);
2040 printk(KERN_ERR PFX "create: START DISC\n");
2041 bnx2fc_start_disc(hba);
2042 /*
2043 * Release from kref_init in bnx2fc_interface_setup, on success
2044 * lport should be holding a reference taken in bnx2fc_if_create
2045 */
2046 bnx2fc_interface_put(hba);
2047 /* put netdev that was held while calling dev_get_by_name */
2048 mutex_unlock(&bnx2fc_dev_lock);
2049 rtnl_unlock();
2050 return 0;
2051
2052if_create_err:
2053 destroy_workqueue(hba->timer_work_queue);
2054ifput_err:
2055 bnx2fc_interface_put(hba);
2056netdev_err:
2057 module_put(THIS_MODULE);
2058mod_err:
2059 mutex_unlock(&bnx2fc_dev_lock);
2060 rtnl_unlock();
2061 return rc;
2062}
2063
2064/**
2065 * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc adapter instance
2066 *
2067 * @cnic: Pointer to cnic device instance
2068 *
2069 **/
2070static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
2071{
2072 struct list_head *list;
2073 struct list_head *temp;
2074 struct bnx2fc_hba *hba;
2075
2076 /* Called with bnx2fc_dev_lock held */
2077 list_for_each_safe(list, temp, &adapter_list) {
2078 hba = (struct bnx2fc_hba *)list;
2079 if (hba->cnic == cnic)
2080 return hba;
2081 }
2082 return NULL;
2083}
2084
2085static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev)
2086{
2087 struct list_head *list;
2088 struct list_head *temp;
2089 struct bnx2fc_hba *hba;
2090
2091 /* Called with bnx2fc_dev_lock held */
2092 list_for_each_safe(list, temp, &adapter_list) {
2093 hba = (struct bnx2fc_hba *)list;
2094 if (hba->phys_dev == phys_dev)
2095 return hba;
2096 }
2097 printk(KERN_ERR PFX "hba_lookup: hba NULL\n");
2098 return NULL;
2099}
2100
2101/**
2102 * bnx2fc_ulp_exit - shuts down adapter instance and frees all resources
2103 *
2104 * @dev cnic device handle
2105 */
2106static void bnx2fc_ulp_exit(struct cnic_dev *dev)
2107{
2108 struct bnx2fc_hba *hba;
2109
2110 BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
2111
2112 if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
2113 printk(KERN_ERR PFX "bnx2fc port check: %s, flags: %lx\n",
2114 dev->netdev->name, dev->flags);
2115 return;
2116 }
2117
2118 mutex_lock(&bnx2fc_dev_lock);
2119 hba = bnx2fc_find_hba_for_cnic(dev);
2120 if (!hba) {
2121 printk(KERN_ERR PFX "bnx2fc_ulp_exit: hba not found, dev 0%p\n",
2122 dev);
2123 mutex_unlock(&bnx2fc_dev_lock);
2124 return;
2125 }
2126
2127 list_del_init(&hba->link);
2128 adapter_count--;
2129
2130 if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
2131 /* destroy not called yet, move to quiesced list */
2132 bnx2fc_netdev_cleanup(hba);
2133 bnx2fc_if_destroy(hba->ctlr.lp);
2134 }
2135 mutex_unlock(&bnx2fc_dev_lock);
2136
2137 bnx2fc_ulp_stop(hba);
2138 /* unregister cnic device */
2139 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
2140 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
2141 bnx2fc_interface_destroy(hba);
2142}
2143
2144/**
2145 * bnx2fc_fcoe_reset - Resets the fcoe
2146 *
2147 * @shost: shost the reset is from
2148 *
2149 * Returns: always 0
2150 */
2151static int bnx2fc_fcoe_reset(struct Scsi_Host *shost)
2152{
2153 struct fc_lport *lport = shost_priv(shost);
2154 fc_lport_reset(lport);
2155 return 0;
2156}
2157
2158
2159static bool bnx2fc_match(struct net_device *netdev)
2160{
2161 mutex_lock(&bnx2fc_dev_lock);
2162 if (netdev->priv_flags & IFF_802_1Q_VLAN) {
2163 struct net_device *phys_dev = vlan_dev_real_dev(netdev);
2164
2165 if (bnx2fc_hba_lookup(phys_dev)) {
2166 mutex_unlock(&bnx2fc_dev_lock);
2167 return true;
2168 }
2169 }
2170 mutex_unlock(&bnx2fc_dev_lock);
2171 return false;
2172}
2173
2174
2175static struct fcoe_transport bnx2fc_transport = {
2176 .name = {"bnx2fc"},
2177 .attached = false,
2178 .list = LIST_HEAD_INIT(bnx2fc_transport.list),
2179 .match = bnx2fc_match,
2180 .create = bnx2fc_create,
2181 .destroy = bnx2fc_destroy,
2182 .enable = bnx2fc_enable,
2183 .disable = bnx2fc_disable,
2184};
2185
2186/**
2187 * bnx2fc_percpu_thread_create - Create a receive thread for an
2188 * online CPU
2189 *
2190 * @cpu: cpu index for the online cpu
2191 */
2192static void bnx2fc_percpu_thread_create(unsigned int cpu)
2193{
2194 struct bnx2fc_percpu_s *p;
2195 struct task_struct *thread;
2196
2197 p = &per_cpu(bnx2fc_percpu, cpu);
2198
2199 thread = kthread_create(bnx2fc_percpu_io_thread,
2200 (void *)p,
2201 "bnx2fc_thread/%d", cpu);
2202 /* bind thread to the cpu */
2203 if (likely(!IS_ERR(p->iothread))) {
2204 kthread_bind(thread, cpu);
2205 p->iothread = thread;
2206 wake_up_process(thread);
2207 }
2208}
2209
2210static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
2211{
2212 struct bnx2fc_percpu_s *p;
2213 struct task_struct *thread;
2214 struct bnx2fc_work *work, *tmp;
2215 LIST_HEAD(work_list);
2216
2217 BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu);
2218
2219 /* Prevent any new work from being queued for this CPU */
2220 p = &per_cpu(bnx2fc_percpu, cpu);
2221 spin_lock_bh(&p->fp_work_lock);
2222 thread = p->iothread;
2223 p->iothread = NULL;
2224
2225
2226 /* Free all work in the list */
2227 list_for_each_entry_safe(work, tmp, &work_list, list) {
2228 list_del_init(&work->list);
2229 bnx2fc_process_cq_compl(work->tgt, work->wqe);
2230 kfree(work);
2231 }
2232
2233 spin_unlock_bh(&p->fp_work_lock);
2234
2235 if (thread)
2236 kthread_stop(thread);
2237}
2238
2239/**
2240 * bnx2fc_cpu_callback - Handler for CPU hotplug events
2241 *
2242 * @nfb: The callback data block
2243 * @action: The event triggering the callback
2244 * @hcpu: The index of the CPU that the event is for
2245 *
2246 * This creates or destroys per-CPU data for fcoe
2247 *
2248 * Returns NOTIFY_OK always.
2249 */
2250static int bnx2fc_cpu_callback(struct notifier_block *nfb,
2251 unsigned long action, void *hcpu)
2252{
2253 unsigned cpu = (unsigned long)hcpu;
2254
2255 switch (action) {
2256 case CPU_ONLINE:
2257 case CPU_ONLINE_FROZEN:
2258 printk(PFX "CPU %x online: Create Rx thread\n", cpu);
2259 bnx2fc_percpu_thread_create(cpu);
2260 break;
2261 case CPU_DEAD:
2262 case CPU_DEAD_FROZEN:
2263 printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
2264 bnx2fc_percpu_thread_destroy(cpu);
2265 break;
2266 default:
2267 break;
2268 }
2269 return NOTIFY_OK;
2270}
2271
2272/**
2273 * bnx2fc_mod_init - module init entry point
2274 *
2275 * Initialize driver wide global data structures, and register
2276 * with cnic module
2277 **/
2278static int __init bnx2fc_mod_init(void)
2279{
2280 struct fcoe_percpu_s *bg;
2281 struct task_struct *l2_thread;
2282 int rc = 0;
2283 unsigned int cpu = 0;
2284 struct bnx2fc_percpu_s *p;
2285
2286 printk(KERN_INFO PFX "%s", version);
2287
2288 /* register as a fcoe transport */
2289 rc = fcoe_transport_attach(&bnx2fc_transport);
2290 if (rc) {
2291 printk(KERN_ERR "failed to register an fcoe transport, check "
2292 "if libfcoe is loaded\n");
2293 goto out;
2294 }
2295
2296 INIT_LIST_HEAD(&adapter_list);
2297 mutex_init(&bnx2fc_dev_lock);
2298 adapter_count = 0;
2299
2300 /* Attach FC transport template */
2301 rc = bnx2fc_attach_transport();
2302 if (rc)
2303 goto detach_ft;
2304
2305 bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0);
2306 if (!bnx2fc_wq) {
2307 rc = -ENOMEM;
2308 goto release_bt;
2309 }
2310
2311 bg = &bnx2fc_global;
2312 skb_queue_head_init(&bg->fcoe_rx_list);
2313 l2_thread = kthread_create(bnx2fc_l2_rcv_thread,
2314 (void *)bg,
2315 "bnx2fc_l2_thread");
2316 if (IS_ERR(l2_thread)) {
2317 rc = PTR_ERR(l2_thread);
2318 goto free_wq;
2319 }
2320 wake_up_process(l2_thread);
2321 spin_lock_bh(&bg->fcoe_rx_list.lock);
2322 bg->thread = l2_thread;
2323 spin_unlock_bh(&bg->fcoe_rx_list.lock);
2324
2325 for_each_possible_cpu(cpu) {
2326 p = &per_cpu(bnx2fc_percpu, cpu);
2327 INIT_LIST_HEAD(&p->work_list);
2328 spin_lock_init(&p->fp_work_lock);
2329 }
2330
2331 for_each_online_cpu(cpu) {
2332 bnx2fc_percpu_thread_create(cpu);
2333 }
2334
2335 /* Initialize per CPU interrupt thread */
2336 register_hotcpu_notifier(&bnx2fc_cpu_notifier);
2337
2338 cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
2339
2340 return 0;
2341
2342free_wq:
2343 destroy_workqueue(bnx2fc_wq);
2344release_bt:
2345 bnx2fc_release_transport();
2346detach_ft:
2347 fcoe_transport_detach(&bnx2fc_transport);
2348out:
2349 return rc;
2350}
2351
2352static void __exit bnx2fc_mod_exit(void)
2353{
2354 LIST_HEAD(to_be_deleted);
2355 struct bnx2fc_hba *hba, *next;
2356 struct fcoe_percpu_s *bg;
2357 struct task_struct *l2_thread;
2358 struct sk_buff *skb;
2359 unsigned int cpu = 0;
2360
2361 /*
2362 * NOTE: Since cnic calls register_driver routine rtnl_lock,
2363 * it will have higher precedence than bnx2fc_dev_lock.
2364 * unregister_device() cannot be called with bnx2fc_dev_lock
2365 * held.
2366 */
2367 mutex_lock(&bnx2fc_dev_lock);
2368 list_splice(&adapter_list, &to_be_deleted);
2369 INIT_LIST_HEAD(&adapter_list);
2370 adapter_count = 0;
2371 mutex_unlock(&bnx2fc_dev_lock);
2372
2373 /* Unregister with cnic */
2374 list_for_each_entry_safe(hba, next, &to_be_deleted, link) {
2375 list_del_init(&hba->link);
2376 printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p, kref = %d\n",
2377 hba, atomic_read(&hba->kref.refcount));
2378 bnx2fc_ulp_stop(hba);
2379 /* unregister cnic device */
2380 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED,
2381 &hba->reg_with_cnic))
2382 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
2383 bnx2fc_interface_destroy(hba);
2384 }
2385 cnic_unregister_driver(CNIC_ULP_FCOE);
2386
2387 /* Destroy global thread */
2388 bg = &bnx2fc_global;
2389 spin_lock_bh(&bg->fcoe_rx_list.lock);
2390 l2_thread = bg->thread;
2391 bg->thread = NULL;
2392 while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL)
2393 kfree_skb(skb);
2394
2395 spin_unlock_bh(&bg->fcoe_rx_list.lock);
2396
2397 if (l2_thread)
2398 kthread_stop(l2_thread);
2399
2400 unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
2401
2402 /* Destroy per cpu threads */
2403 for_each_online_cpu(cpu) {
2404 bnx2fc_percpu_thread_destroy(cpu);
2405 }
2406
2407 destroy_workqueue(bnx2fc_wq);
2408 /*
2409 * detach from scsi transport
2410 * must happen after all destroys are done
2411 */
2412 bnx2fc_release_transport();
2413
2414 /* detach from fcoe transport */
2415 fcoe_transport_detach(&bnx2fc_transport);
2416}
2417
2418module_init(bnx2fc_mod_init);
2419module_exit(bnx2fc_mod_exit);
2420
2421static struct fc_function_template bnx2fc_transport_function = {
2422 .show_host_node_name = 1,
2423 .show_host_port_name = 1,
2424 .show_host_supported_classes = 1,
2425 .show_host_supported_fc4s = 1,
2426 .show_host_active_fc4s = 1,
2427 .show_host_maxframe_size = 1,
2428
2429 .show_host_port_id = 1,
2430 .show_host_supported_speeds = 1,
2431 .get_host_speed = fc_get_host_speed,
2432 .show_host_speed = 1,
2433 .show_host_port_type = 1,
2434 .get_host_port_state = fc_get_host_port_state,
2435 .show_host_port_state = 1,
2436 .show_host_symbolic_name = 1,
2437
2438 .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
2439 sizeof(struct bnx2fc_rport)),
2440 .show_rport_maxframe_size = 1,
2441 .show_rport_supported_classes = 1,
2442
2443 .show_host_fabric_name = 1,
2444 .show_starget_node_name = 1,
2445 .show_starget_port_name = 1,
2446 .show_starget_port_id = 1,
2447 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
2448 .show_rport_dev_loss_tmo = 1,
2449 .get_fc_host_stats = bnx2fc_get_host_stats,
2450
2451 .issue_fc_host_lip = bnx2fc_fcoe_reset,
2452
2453 .terminate_rport_io = fc_rport_terminate_io,
2454
2455 .vport_create = bnx2fc_vport_create,
2456 .vport_delete = bnx2fc_vport_destroy,
2457 .vport_disable = bnx2fc_vport_disable,
2458};
2459
2460static struct fc_function_template bnx2fc_vport_xport_function = {
2461 .show_host_node_name = 1,
2462 .show_host_port_name = 1,
2463 .show_host_supported_classes = 1,
2464 .show_host_supported_fc4s = 1,
2465 .show_host_active_fc4s = 1,
2466 .show_host_maxframe_size = 1,
2467
2468 .show_host_port_id = 1,
2469 .show_host_supported_speeds = 1,
2470 .get_host_speed = fc_get_host_speed,
2471 .show_host_speed = 1,
2472 .show_host_port_type = 1,
2473 .get_host_port_state = fc_get_host_port_state,
2474 .show_host_port_state = 1,
2475 .show_host_symbolic_name = 1,
2476
2477 .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
2478 sizeof(struct bnx2fc_rport)),
2479 .show_rport_maxframe_size = 1,
2480 .show_rport_supported_classes = 1,
2481
2482 .show_host_fabric_name = 1,
2483 .show_starget_node_name = 1,
2484 .show_starget_port_name = 1,
2485 .show_starget_port_id = 1,
2486 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
2487 .show_rport_dev_loss_tmo = 1,
2488 .get_fc_host_stats = fc_get_host_stats,
2489 .issue_fc_host_lip = bnx2fc_fcoe_reset,
2490 .terminate_rport_io = fc_rport_terminate_io,
2491};
2492
2493/**
2494 * scsi_host_template structure used while registering with SCSI-ml
2495 */
2496static struct scsi_host_template bnx2fc_shost_template = {
2497 .module = THIS_MODULE,
2498 .name = "Broadcom Offload FCoE Initiator",
2499 .queuecommand = bnx2fc_queuecommand,
2500 .eh_abort_handler = bnx2fc_eh_abort, /* abts */
2501 .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
2502 .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
2503 .eh_host_reset_handler = fc_eh_host_reset,
2504 .slave_alloc = fc_slave_alloc,
2505 .change_queue_depth = fc_change_queue_depth,
2506 .change_queue_type = fc_change_queue_type,
2507 .this_id = -1,
2508 .cmd_per_lun = 3,
2509 .can_queue = (BNX2FC_MAX_OUTSTANDING_CMNDS/2),
2510 .use_clustering = ENABLE_CLUSTERING,
2511 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
2512 .max_sectors = 512,
2513};
2514
2515static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
2516 .frame_send = bnx2fc_xmit,
2517 .elsct_send = bnx2fc_elsct_send,
2518 .fcp_abort_io = bnx2fc_abort_io,
2519 .fcp_cleanup = bnx2fc_cleanup,
2520 .rport_event_callback = bnx2fc_rport_event_handler,
2521};
2522
2523/**
2524 * bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface
2525 * structure carrying callback function pointers
2526 */
2527static struct cnic_ulp_ops bnx2fc_cnic_cb = {
2528 .owner = THIS_MODULE,
2529 .cnic_init = bnx2fc_ulp_init,
2530 .cnic_exit = bnx2fc_ulp_exit,
2531 .cnic_start = bnx2fc_ulp_start,
2532 .cnic_stop = bnx2fc_ulp_stop,
2533 .indicate_kcqes = bnx2fc_indicate_kcqe,
2534 .indicate_netevent = bnx2fc_indicate_netevent,
2535};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
new file mode 100644
index 000000000000..4f4096836742
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -0,0 +1,1868 @@
1/* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * This file contains the code that low level functions that interact
3 * with 57712 FCoE firmware.
4 *
5 * Copyright (c) 2008 - 2010 Broadcom Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
12 */
13
14#include "bnx2fc.h"
15
16DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
17
18static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
19 struct fcoe_kcqe *new_cqe_kcqe);
20static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
21 struct fcoe_kcqe *ofld_kcqe);
22static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
23 struct fcoe_kcqe *ofld_kcqe);
24static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
25static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
26 struct fcoe_kcqe *conn_destroy);
27
28int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
29{
30 struct fcoe_kwqe_stat stat_req;
31 struct kwqe *kwqe_arr[2];
32 int num_kwqes = 1;
33 int rc = 0;
34
35 memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
36 stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
37 stat_req.hdr.flags =
38 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
39
40 stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
41 stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
42
43 kwqe_arr[0] = (struct kwqe *) &stat_req;
44
45 if (hba->cnic && hba->cnic->submit_kwqes)
46 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
47
48 return rc;
49}
50
51/**
52 * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
53 *
54 * @hba: adapter structure pointer
55 *
56 * Send down FCoE firmware init KWQEs which initiates the initial handshake
57 * with the f/w.
58 *
59 */
60int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
61{
62 struct fcoe_kwqe_init1 fcoe_init1;
63 struct fcoe_kwqe_init2 fcoe_init2;
64 struct fcoe_kwqe_init3 fcoe_init3;
65 struct kwqe *kwqe_arr[3];
66 int num_kwqes = 3;
67 int rc = 0;
68
69 if (!hba->cnic) {
70 printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n");
71 return -ENODEV;
72 }
73
74 /* fill init1 KWQE */
75 memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
76 fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
77 fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
78 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
79
80 fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
81 fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
82 fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
83 fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
84 fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
85 fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
86 fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
87 fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
88 fcoe_init1.task_list_pbl_addr_hi =
89 (u32) ((u64) hba->task_ctx_bd_dma >> 32);
90 fcoe_init1.mtu = hba->netdev->mtu;
91
92 fcoe_init1.flags = (PAGE_SHIFT <<
93 FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
94
95 fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
96
97 /* fill init2 KWQE */
98 memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
99 fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
100 fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
101 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
102
103 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
104 fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
105 ((u64) hba->hash_tbl_pbl_dma >> 32);
106
107 fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
108 fcoe_init2.t2_hash_tbl_addr_hi = (u32)
109 ((u64) hba->t2_hash_tbl_dma >> 32);
110
111 fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
112 fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
113 ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
114
115 fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
116
117 /* fill init3 KWQE */
118 memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
119 fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
120 fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
121 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
122 fcoe_init3.error_bit_map_lo = 0xffffffff;
123 fcoe_init3.error_bit_map_hi = 0xffffffff;
124
125
126 kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
127 kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
128 kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
129
130 if (hba->cnic && hba->cnic->submit_kwqes)
131 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
132
133 return rc;
134}
135int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
136{
137 struct fcoe_kwqe_destroy fcoe_destroy;
138 struct kwqe *kwqe_arr[2];
139 int num_kwqes = 1;
140 int rc = -1;
141
142 /* fill destroy KWQE */
143 memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
144 fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
145 fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
146 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
147 kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
148
149 if (hba->cnic && hba->cnic->submit_kwqes)
150 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
151 return rc;
152}
153
154/**
155 * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
156 *
157 * @port: port structure pointer
158 * @tgt: bnx2fc_rport structure pointer
159 */
160int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
161 struct bnx2fc_rport *tgt)
162{
163 struct fc_lport *lport = port->lport;
164 struct bnx2fc_hba *hba = port->priv;
165 struct kwqe *kwqe_arr[4];
166 struct fcoe_kwqe_conn_offload1 ofld_req1;
167 struct fcoe_kwqe_conn_offload2 ofld_req2;
168 struct fcoe_kwqe_conn_offload3 ofld_req3;
169 struct fcoe_kwqe_conn_offload4 ofld_req4;
170 struct fc_rport_priv *rdata = tgt->rdata;
171 struct fc_rport *rport = tgt->rport;
172 int num_kwqes = 4;
173 u32 port_id;
174 int rc = 0;
175 u16 conn_id;
176
177 /* Initialize offload request 1 structure */
178 memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
179
180 ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
181 ofld_req1.hdr.flags =
182 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
183
184
185 conn_id = (u16)tgt->fcoe_conn_id;
186 ofld_req1.fcoe_conn_id = conn_id;
187
188
189 ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
190 ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
191
192 ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
193 ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
194
195 ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
196 ofld_req1.rq_first_pbe_addr_hi =
197 (u32)((u64) tgt->rq_dma >> 32);
198
199 ofld_req1.rq_prod = 0x8000;
200
201 /* Initialize offload request 2 structure */
202 memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
203
204 ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
205 ofld_req2.hdr.flags =
206 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
207
208 ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
209
210 ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
211 ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
212
213 ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
214 ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
215
216 ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
217 ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
218
219 /* Initialize offload request 3 structure */
220 memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
221
222 ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
223 ofld_req3.hdr.flags =
224 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
225
226 ofld_req3.vlan_tag = hba->vlan_id <<
227 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
228 ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
229
230 port_id = fc_host_port_id(lport->host);
231 if (port_id == 0) {
232 BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
233 return -EINVAL;
234 }
235
236 /*
237 * Store s_id of the initiator for further reference. This will
238 * be used during disable/destroy during linkdown processing as
239 * when the lport is reset, the port_id also is reset to 0
240 */
241 tgt->sid = port_id;
242 ofld_req3.s_id[0] = (port_id & 0x000000FF);
243 ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
244 ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
245
246 port_id = rport->port_id;
247 ofld_req3.d_id[0] = (port_id & 0x000000FF);
248 ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
249 ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
250
251 ofld_req3.tx_total_conc_seqs = rdata->max_seq;
252
253 ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
254 ofld_req3.rx_max_fc_pay_len = lport->mfs;
255
256 ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
257 ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
258 ofld_req3.rx_open_seqs_exch_c3 = 1;
259
260 ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
261 ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
262
263 /* set mul_n_port_ids supported flag to 0, until it is supported */
264 ofld_req3.flags = 0;
265 /*
266 ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
267 FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
268 */
269 /* Info from PLOGI response */
270 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
271 FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
272
273 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
274 FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
275
276 /* vlan flag */
277 ofld_req3.flags |= (hba->vlan_enabled <<
278 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
279
280 /* C2_VALID and ACK flags are not set as they are not suppported */
281
282
283 /* Initialize offload request 4 structure */
284 memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
285 ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
286 ofld_req4.hdr.flags =
287 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
288
289 ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
290
291
292 ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5];
293 /* local mac */
294 ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4];
295 ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3];
296 ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2];
297 ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1];
298 ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0];
299 ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
300 ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
301 ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
302 ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
303 ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
304 ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
305
306 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
307 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
308
309 ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
310 ofld_req4.confq_pbl_base_addr_hi =
311 (u32)((u64) tgt->confq_pbl_dma >> 32);
312
313 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
314 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
315 kwqe_arr[2] = (struct kwqe *) &ofld_req3;
316 kwqe_arr[3] = (struct kwqe *) &ofld_req4;
317
318 if (hba->cnic && hba->cnic->submit_kwqes)
319 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
320
321 return rc;
322}
323
324/**
325 * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
326 *
327 * @port: port structure pointer
328 * @tgt: bnx2fc_rport structure pointer
329 */
330static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
331 struct bnx2fc_rport *tgt)
332{
333 struct kwqe *kwqe_arr[2];
334 struct bnx2fc_hba *hba = port->priv;
335 struct fcoe_kwqe_conn_enable_disable enbl_req;
336 struct fc_lport *lport = port->lport;
337 struct fc_rport *rport = tgt->rport;
338 int num_kwqes = 1;
339 int rc = 0;
340 u32 port_id;
341
342 memset(&enbl_req, 0x00,
343 sizeof(struct fcoe_kwqe_conn_enable_disable));
344 enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
345 enbl_req.hdr.flags =
346 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
347
348 enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
349 /* local mac */
350 enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4];
351 enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
352 enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
353 enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
354 enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
355
356 enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
357 enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
358 enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
359 enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
360 enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
361 enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
362
363 port_id = fc_host_port_id(lport->host);
364 if (port_id != tgt->sid) {
365 printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
366 "sid = 0x%x\n", port_id, tgt->sid);
367 port_id = tgt->sid;
368 }
369 enbl_req.s_id[0] = (port_id & 0x000000FF);
370 enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
371 enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
372
373 port_id = rport->port_id;
374 enbl_req.d_id[0] = (port_id & 0x000000FF);
375 enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
376 enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
377 enbl_req.vlan_tag = hba->vlan_id <<
378 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
379 enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
380 enbl_req.vlan_flag = hba->vlan_enabled;
381 enbl_req.context_id = tgt->context_id;
382 enbl_req.conn_id = tgt->fcoe_conn_id;
383
384 kwqe_arr[0] = (struct kwqe *) &enbl_req;
385
386 if (hba->cnic && hba->cnic->submit_kwqes)
387 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
388 return rc;
389}
390
391/**
392 * bnx2fc_send_session_disable_req - initiates FCoE Session disable
393 *
394 * @port: port structure pointer
395 * @tgt: bnx2fc_rport structure pointer
396 */
397int bnx2fc_send_session_disable_req(struct fcoe_port *port,
398 struct bnx2fc_rport *tgt)
399{
400 struct bnx2fc_hba *hba = port->priv;
401 struct fcoe_kwqe_conn_enable_disable disable_req;
402 struct kwqe *kwqe_arr[2];
403 struct fc_rport *rport = tgt->rport;
404 int num_kwqes = 1;
405 int rc = 0;
406 u32 port_id;
407
408 memset(&disable_req, 0x00,
409 sizeof(struct fcoe_kwqe_conn_enable_disable));
410 disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
411 disable_req.hdr.flags =
412 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
413
414 disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
415 disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
416 disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
417 disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
418 disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
419
420 disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
421 disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
422 disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
423 disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
424 disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
425 disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
426
427 port_id = tgt->sid;
428 disable_req.s_id[0] = (port_id & 0x000000FF);
429 disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
430 disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
431
432
433 port_id = rport->port_id;
434 disable_req.d_id[0] = (port_id & 0x000000FF);
435 disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
436 disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
437 disable_req.context_id = tgt->context_id;
438 disable_req.conn_id = tgt->fcoe_conn_id;
439 disable_req.vlan_tag = hba->vlan_id <<
440 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
441 disable_req.vlan_tag |=
442 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
443 disable_req.vlan_flag = hba->vlan_enabled;
444
445 kwqe_arr[0] = (struct kwqe *) &disable_req;
446
447 if (hba->cnic && hba->cnic->submit_kwqes)
448 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
449
450 return rc;
451}
452
453/**
454 * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
455 *
456 * @port: port structure pointer
457 * @tgt: bnx2fc_rport structure pointer
458 */
459int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
460 struct bnx2fc_rport *tgt)
461{
462 struct fcoe_kwqe_conn_destroy destroy_req;
463 struct kwqe *kwqe_arr[2];
464 int num_kwqes = 1;
465 int rc = 0;
466
467 memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
468 destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
469 destroy_req.hdr.flags =
470 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
471
472 destroy_req.context_id = tgt->context_id;
473 destroy_req.conn_id = tgt->fcoe_conn_id;
474
475 kwqe_arr[0] = (struct kwqe *) &destroy_req;
476
477 if (hba->cnic && hba->cnic->submit_kwqes)
478 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
479
480 return rc;
481}
482
483static void bnx2fc_unsol_els_work(struct work_struct *work)
484{
485 struct bnx2fc_unsol_els *unsol_els;
486 struct fc_lport *lport;
487 struct fc_frame *fp;
488
489 unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
490 lport = unsol_els->lport;
491 fp = unsol_els->fp;
492 fc_exch_recv(lport, fp);
493 kfree(unsol_els);
494}
495
496void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
497 unsigned char *buf,
498 u32 frame_len, u16 l2_oxid)
499{
500 struct fcoe_port *port = tgt->port;
501 struct fc_lport *lport = port->lport;
502 struct bnx2fc_unsol_els *unsol_els;
503 struct fc_frame_header *fh;
504 struct fc_frame *fp;
505 struct sk_buff *skb;
506 u32 payload_len;
507 u32 crc;
508 u8 op;
509
510
511 unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
512 if (!unsol_els) {
513 BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
514 return;
515 }
516
517 BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
518 l2_oxid, frame_len);
519
520 payload_len = frame_len - sizeof(struct fc_frame_header);
521
522 fp = fc_frame_alloc(lport, payload_len);
523 if (!fp) {
524 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
525 return;
526 }
527
528 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
529 /* Copy FC Frame header and payload into the frame */
530 memcpy(fh, buf, frame_len);
531
532 if (l2_oxid != FC_XID_UNKNOWN)
533 fh->fh_ox_id = htons(l2_oxid);
534
535 skb = fp_skb(fp);
536
537 if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
538 (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
539
540 if (fh->fh_type == FC_TYPE_ELS) {
541 op = fc_frame_payload_op(fp);
542 if ((op == ELS_TEST) || (op == ELS_ESTC) ||
543 (op == ELS_FAN) || (op == ELS_CSU)) {
544 /*
545 * No need to reply for these
546 * ELS requests
547 */
548 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
549 kfree_skb(skb);
550 return;
551 }
552 }
553 crc = fcoe_fc_crc(fp);
554 fc_frame_init(fp);
555 fr_dev(fp) = lport;
556 fr_sof(fp) = FC_SOF_I3;
557 fr_eof(fp) = FC_EOF_T;
558 fr_crc(fp) = cpu_to_le32(~crc);
559 unsol_els->lport = lport;
560 unsol_els->fp = fp;
561 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
562 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
563 } else {
564 BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
565 kfree_skb(skb);
566 }
567}
568
569static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
570{
571 u8 num_rq;
572 struct fcoe_err_report_entry *err_entry;
573 unsigned char *rq_data;
574 unsigned char *buf = NULL, *buf1;
575 int i;
576 u16 xid;
577 u32 frame_len, len;
578 struct bnx2fc_cmd *io_req = NULL;
579 struct fcoe_task_ctx_entry *task, *task_page;
580 struct bnx2fc_hba *hba = tgt->port->priv;
581 int task_idx, index;
582 int rc = 0;
583
584
585 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
586 switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
587 case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
588 frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
589 FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
590
591 num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
592
593 rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
594 if (rq_data) {
595 buf = rq_data;
596 } else {
597 buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
598 GFP_ATOMIC);
599
600 if (!buf1) {
601 BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
602 break;
603 }
604
605 for (i = 0; i < num_rq; i++) {
606 rq_data = (unsigned char *)
607 bnx2fc_get_next_rqe(tgt, 1);
608 len = BNX2FC_RQ_BUF_SZ;
609 memcpy(buf1, rq_data, len);
610 buf1 += len;
611 }
612 }
613 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
614 FC_XID_UNKNOWN);
615
616 if (buf != rq_data)
617 kfree(buf);
618 bnx2fc_return_rqe(tgt, num_rq);
619 break;
620
621 case FCOE_ERROR_DETECTION_CQE_TYPE:
622 /*
623 *In case of error reporting CQE a single RQ entry
624 * is consumes.
625 */
626 spin_lock_bh(&tgt->tgt_lock);
627 num_rq = 1;
628 err_entry = (struct fcoe_err_report_entry *)
629 bnx2fc_get_next_rqe(tgt, 1);
630 xid = err_entry->fc_hdr.ox_id;
631 BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
632 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
633 err_entry->err_warn_bitmap_hi,
634 err_entry->err_warn_bitmap_lo);
635 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
636 err_entry->tx_buf_off, err_entry->rx_buf_off);
637
638 bnx2fc_return_rqe(tgt, 1);
639
640 if (xid > BNX2FC_MAX_XID) {
641 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
642 xid);
643 spin_unlock_bh(&tgt->tgt_lock);
644 break;
645 }
646
647 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
648 index = xid % BNX2FC_TASKS_PER_PAGE;
649 task_page = (struct fcoe_task_ctx_entry *)
650 hba->task_ctx[task_idx];
651 task = &(task_page[index]);
652
653 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
654 if (!io_req) {
655 spin_unlock_bh(&tgt->tgt_lock);
656 break;
657 }
658
659 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
660 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
661 spin_unlock_bh(&tgt->tgt_lock);
662 break;
663 }
664
665 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
666 &io_req->req_flags)) {
667 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
668 "progress.. ignore unsol err\n");
669 spin_unlock_bh(&tgt->tgt_lock);
670 break;
671 }
672
673 /*
674 * If ABTS is already in progress, and FW error is
675 * received after that, do not cancel the timeout_work
676 * and let the error recovery continue by explicitly
677 * logging out the target, when the ABTS eventually
678 * times out.
679 */
680 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
681 &io_req->req_flags)) {
682 /*
683 * Cancel the timeout_work, as we received IO
684 * completion with FW error.
685 */
686 if (cancel_delayed_work(&io_req->timeout_work))
687 kref_put(&io_req->refcount,
688 bnx2fc_cmd_release); /* timer hold */
689
690 rc = bnx2fc_initiate_abts(io_req);
691 if (rc != SUCCESS) {
692 BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
693 "failed. issue cleanup\n");
694 rc = bnx2fc_initiate_cleanup(io_req);
695 BUG_ON(rc);
696 }
697 } else
698 printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
699 "in ABTS processing\n", xid);
700 spin_unlock_bh(&tgt->tgt_lock);
701 break;
702
703 case FCOE_WARNING_DETECTION_CQE_TYPE:
704 /*
705 *In case of warning reporting CQE a single RQ entry
706 * is consumes.
707 */
708 num_rq = 1;
709 err_entry = (struct fcoe_err_report_entry *)
710 bnx2fc_get_next_rqe(tgt, 1);
711 xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
712 BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
713 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
714 err_entry->err_warn_bitmap_hi,
715 err_entry->err_warn_bitmap_lo);
716 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
717 err_entry->tx_buf_off, err_entry->rx_buf_off);
718
719 bnx2fc_return_rqe(tgt, 1);
720 break;
721
722 default:
723 printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
724 break;
725 }
726}
727
728void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
729{
730 struct fcoe_task_ctx_entry *task;
731 struct fcoe_task_ctx_entry *task_page;
732 struct fcoe_port *port = tgt->port;
733 struct bnx2fc_hba *hba = port->priv;
734 struct bnx2fc_cmd *io_req;
735 int task_idx, index;
736 u16 xid;
737 u8 cmd_type;
738 u8 rx_state = 0;
739 u8 num_rq;
740
741 spin_lock_bh(&tgt->tgt_lock);
742 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
743 if (xid >= BNX2FC_MAX_TASKS) {
744 printk(KERN_ALERT PFX "ERROR:xid out of range\n");
745 spin_unlock_bh(&tgt->tgt_lock);
746 return;
747 }
748 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
749 index = xid % BNX2FC_TASKS_PER_PAGE;
750 task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
751 task = &(task_page[index]);
752
753 num_rq = ((task->rx_wr_tx_rd.rx_flags &
754 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >>
755 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT);
756
757 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
758
759 if (io_req == NULL) {
760 printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
761 spin_unlock_bh(&tgt->tgt_lock);
762 return;
763 }
764
765 /* Timestamp IO completion time */
766 cmd_type = io_req->cmd_type;
767
768 /* optimized completion path */
769 if (cmd_type == BNX2FC_SCSI_CMD) {
770 rx_state = ((task->rx_wr_tx_rd.rx_flags &
771 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >>
772 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT);
773
774 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
775 bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
776 spin_unlock_bh(&tgt->tgt_lock);
777 return;
778 }
779 }
780
781 /* Process other IO completion types */
782 switch (cmd_type) {
783 case BNX2FC_SCSI_CMD:
784 if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
785 bnx2fc_process_abts_compl(io_req, task, num_rq);
786 else if (rx_state ==
787 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
788 bnx2fc_process_cleanup_compl(io_req, task, num_rq);
789 else
790 printk(KERN_ERR PFX "Invalid rx state - %d\n",
791 rx_state);
792 break;
793
794 case BNX2FC_TASK_MGMT_CMD:
795 BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
796 bnx2fc_process_tm_compl(io_req, task, num_rq);
797 break;
798
799 case BNX2FC_ABTS:
800 /*
801 * ABTS request received by firmware. ABTS response
802 * will be delivered to the task belonging to the IO
803 * that was aborted
804 */
805 BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
806 kref_put(&io_req->refcount, bnx2fc_cmd_release);
807 break;
808
809 case BNX2FC_ELS:
810 BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n");
811 bnx2fc_process_els_compl(io_req, task, num_rq);
812 break;
813
814 case BNX2FC_CLEANUP:
815 BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
816 kref_put(&io_req->refcount, bnx2fc_cmd_release);
817 break;
818
819 default:
820 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
821 break;
822 }
823 spin_unlock_bh(&tgt->tgt_lock);
824}
825
826struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
827{
828 struct bnx2fc_work *work;
829 work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
830 if (!work)
831 return NULL;
832
833 INIT_LIST_HEAD(&work->list);
834 work->tgt = tgt;
835 work->wqe = wqe;
836 return work;
837}
838
839int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
840{
841 struct fcoe_cqe *cq;
842 u32 cq_cons;
843 struct fcoe_cqe *cqe;
844 u16 wqe;
845 bool more_cqes_found = false;
846
847 /*
848 * cq_lock is a low contention lock used to protect
849 * the CQ data structure from being freed up during
850 * the upload operation
851 */
852 spin_lock_bh(&tgt->cq_lock);
853
854 if (!tgt->cq) {
855 printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
856 spin_unlock_bh(&tgt->cq_lock);
857 return 0;
858 }
859 cq = tgt->cq;
860 cq_cons = tgt->cq_cons_idx;
861 cqe = &cq[cq_cons];
862
863 do {
864 more_cqes_found ^= true;
865
866 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
867 (tgt->cq_curr_toggle_bit <<
868 FCOE_CQE_TOGGLE_BIT_SHIFT)) {
869
870 /* new entry on the cq */
871 if (wqe & FCOE_CQE_CQE_TYPE) {
872 /* Unsolicited event notification */
873 bnx2fc_process_unsol_compl(tgt, wqe);
874 } else {
875 struct bnx2fc_work *work = NULL;
876 struct bnx2fc_percpu_s *fps = NULL;
877 unsigned int cpu = wqe % num_possible_cpus();
878
879 fps = &per_cpu(bnx2fc_percpu, cpu);
880 spin_lock_bh(&fps->fp_work_lock);
881 if (unlikely(!fps->iothread))
882 goto unlock;
883
884 work = bnx2fc_alloc_work(tgt, wqe);
885 if (work)
886 list_add_tail(&work->list,
887 &fps->work_list);
888unlock:
889 spin_unlock_bh(&fps->fp_work_lock);
890
891 /* Pending work request completion */
892 if (fps->iothread && work)
893 wake_up_process(fps->iothread);
894 else
895 bnx2fc_process_cq_compl(tgt, wqe);
896 }
897 cqe++;
898 tgt->cq_cons_idx++;
899
900 if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
901 tgt->cq_cons_idx = 0;
902 cqe = cq;
903 tgt->cq_curr_toggle_bit =
904 1 - tgt->cq_curr_toggle_bit;
905 }
906 }
907 /* Re-arm CQ */
908 if (more_cqes_found) {
909 tgt->conn_db->cq_arm.lo = -1;
910 wmb();
911 }
912 } while (more_cqes_found);
913
914 /*
915 * Commit tgt->cq_cons_idx change to the memory
916 * spin_lock implies full memory barrier, no need to smp_wmb
917 */
918
919 spin_unlock_bh(&tgt->cq_lock);
920 return 0;
921}
922
923/**
924 * bnx2fc_fastpath_notification - process global event queue (KCQ)
925 *
926 * @hba: adapter structure pointer
927 * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry
928 *
929 * Fast path event notification handler
930 */
931static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
932 struct fcoe_kcqe *new_cqe_kcqe)
933{
934 u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
935 struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
936
937 if (!tgt) {
938 printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id);
939 return;
940 }
941
942 bnx2fc_process_new_cqes(tgt);
943}
944
945/**
946 * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
947 *
948 * @hba: adapter structure pointer
949 * @ofld_kcqe: connection offload kcqe pointer
950 *
951 * handle session offload completion, enable the session if offload is
952 * successful.
953 */
954static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
955 struct fcoe_kcqe *ofld_kcqe)
956{
957 struct bnx2fc_rport *tgt;
958 struct fcoe_port *port;
959 u32 conn_id;
960 u32 context_id;
961 int rc;
962
963 conn_id = ofld_kcqe->fcoe_conn_id;
964 context_id = ofld_kcqe->fcoe_conn_context_id;
965 tgt = hba->tgt_ofld_list[conn_id];
966 if (!tgt) {
967 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
968 return;
969 }
970 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
971 ofld_kcqe->fcoe_conn_context_id);
972 port = tgt->port;
973 if (hba != tgt->port->priv) {
974 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n");
975 goto ofld_cmpl_err;
976 }
977 /*
978 * cnic has allocated a context_id for this session; use this
979 * while enabling the session.
980 */
981 tgt->context_id = context_id;
982 if (ofld_kcqe->completion_status) {
983 if (ofld_kcqe->completion_status ==
984 FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
985 printk(KERN_ERR PFX "unable to allocate FCoE context "
986 "resources\n");
987 set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
988 }
989 goto ofld_cmpl_err;
990 } else {
991
992 /* now enable the session */
993 rc = bnx2fc_send_session_enable_req(port, tgt);
994 if (rc) {
995 printk(KERN_ALERT PFX "enable session failed\n");
996 goto ofld_cmpl_err;
997 }
998 }
999 return;
1000ofld_cmpl_err:
1001 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1002 wake_up_interruptible(&tgt->ofld_wait);
1003}
1004
1005/**
1006 * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1007 *
1008 * @hba: adapter structure pointer
1009 * @ofld_kcqe: connection offload kcqe pointer
1010 *
1011 * handle session enable completion, mark the rport as ready
1012 */
1013
1014static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1015 struct fcoe_kcqe *ofld_kcqe)
1016{
1017 struct bnx2fc_rport *tgt;
1018 u32 conn_id;
1019 u32 context_id;
1020
1021 context_id = ofld_kcqe->fcoe_conn_context_id;
1022 conn_id = ofld_kcqe->fcoe_conn_id;
1023 tgt = hba->tgt_ofld_list[conn_id];
1024 if (!tgt) {
1025 printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1026 return;
1027 }
1028
1029 BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
1030 ofld_kcqe->fcoe_conn_context_id);
1031
1032 /*
1033 * context_id should be the same for this target during offload
1034 * and enable
1035 */
1036 if (tgt->context_id != context_id) {
1037 printk(KERN_ALERT PFX "context id mis-match\n");
1038 return;
1039 }
1040 if (hba != tgt->port->priv) {
1041 printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1042 goto enbl_cmpl_err;
1043 }
1044 if (ofld_kcqe->completion_status) {
1045 goto enbl_cmpl_err;
1046 } else {
1047 /* enable successful - rport ready for issuing IOs */
1048 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1049 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1050 wake_up_interruptible(&tgt->ofld_wait);
1051 }
1052 return;
1053
1054enbl_cmpl_err:
1055 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1056 wake_up_interruptible(&tgt->ofld_wait);
1057}
1058
1059static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1060 struct fcoe_kcqe *disable_kcqe)
1061{
1062
1063 struct bnx2fc_rport *tgt;
1064 u32 conn_id;
1065
1066 conn_id = disable_kcqe->fcoe_conn_id;
1067 tgt = hba->tgt_ofld_list[conn_id];
1068 if (!tgt) {
1069 printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n");
1070 return;
1071 }
1072
1073 BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1074
1075 if (disable_kcqe->completion_status) {
1076 printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n",
1077 disable_kcqe->completion_status);
1078 return;
1079 } else {
1080 /* disable successful */
1081 BNX2FC_TGT_DBG(tgt, "disable successful\n");
1082 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1083 set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1084 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1085 wake_up_interruptible(&tgt->upld_wait);
1086 }
1087}
1088
1089static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1090 struct fcoe_kcqe *destroy_kcqe)
1091{
1092 struct bnx2fc_rport *tgt;
1093 u32 conn_id;
1094
1095 conn_id = destroy_kcqe->fcoe_conn_id;
1096 tgt = hba->tgt_ofld_list[conn_id];
1097 if (!tgt) {
1098 printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n");
1099 return;
1100 }
1101
1102 BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1103
1104 if (destroy_kcqe->completion_status) {
1105 printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n",
1106 destroy_kcqe->completion_status);
1107 return;
1108 } else {
1109 /* destroy successful */
1110 BNX2FC_TGT_DBG(tgt, "upload successful\n");
1111 clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1112 set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
1113 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1114 wake_up_interruptible(&tgt->upld_wait);
1115 }
1116}
1117
1118static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1119{
1120 switch (err_code) {
1121 case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
1122 printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
1123 break;
1124
1125 case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
1126 printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
1127 break;
1128
1129 case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1130 printk(KERN_ERR PFX "init_failure due to NIC error\n");
1131 break;
1132
1133 default:
1134 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1135 }
1136}
1137
1138/**
1139 * bnx2fc_indicae_kcqe - process KCQE
1140 *
1141 * @hba: adapter structure pointer
1142 * @kcqe: kcqe pointer
1143 * @num_cqe: Number of completion queue elements
1144 *
1145 * Generic KCQ event handler
1146 */
1147void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1148 u32 num_cqe)
1149{
1150 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1151 int i = 0;
1152 struct fcoe_kcqe *kcqe = NULL;
1153
1154 while (i < num_cqe) {
1155 kcqe = (struct fcoe_kcqe *) kcq[i++];
1156
1157 switch (kcqe->op_code) {
1158 case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
1159 bnx2fc_fastpath_notification(hba, kcqe);
1160 break;
1161
1162 case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1163 bnx2fc_process_ofld_cmpl(hba, kcqe);
1164 break;
1165
1166 case FCOE_KCQE_OPCODE_ENABLE_CONN:
1167 bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1168 break;
1169
1170 case FCOE_KCQE_OPCODE_INIT_FUNC:
1171 if (kcqe->completion_status !=
1172 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1173 bnx2fc_init_failure(hba,
1174 kcqe->completion_status);
1175 } else {
1176 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1177 bnx2fc_get_link_state(hba);
1178 printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
1179 (u8)hba->pcidev->bus->number);
1180 }
1181 break;
1182
1183 case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1184 if (kcqe->completion_status !=
1185 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1186
1187 printk(KERN_ERR PFX "DESTROY failed\n");
1188 } else {
1189 printk(KERN_ERR PFX "DESTROY success\n");
1190 }
1191 hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
1192 wake_up_interruptible(&hba->destroy_wait);
1193 break;
1194
1195 case FCOE_KCQE_OPCODE_DISABLE_CONN:
1196 bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1197 break;
1198
1199 case FCOE_KCQE_OPCODE_DESTROY_CONN:
1200 bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1201 break;
1202
1203 case FCOE_KCQE_OPCODE_STAT_FUNC:
1204 if (kcqe->completion_status !=
1205 FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
1206 printk(KERN_ERR PFX "STAT failed\n");
1207 complete(&hba->stat_req_done);
1208 break;
1209
1210 case FCOE_KCQE_OPCODE_FCOE_ERROR:
1211 /* fall thru */
1212 default:
1213 printk(KERN_ALERT PFX "unknown opcode 0x%x\n",
1214 kcqe->op_code);
1215 }
1216 }
1217}
1218
1219void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1220{
1221 struct fcoe_sqe *sqe;
1222
1223 sqe = &tgt->sq[tgt->sq_prod_idx];
1224
1225 /* Fill SQ WQE */
1226 sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1227 sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
1228
1229 /* Advance SQ Prod Idx */
1230 if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
1231 tgt->sq_prod_idx = 0;
1232 tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
1233 }
1234}
1235
1236void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1237{
1238 struct b577xx_doorbell_set_prod ev_doorbell;
1239 u32 msg;
1240
1241 wmb();
1242
1243 memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod));
1244 ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE;
1245
1246 ev_doorbell.prod = tgt->sq_prod_idx |
1247 (tgt->sq_curr_toggle_bit << 15);
1248 ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE <<
1249 B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
1250 msg = *((u32 *)&ev_doorbell);
1251 writel(cpu_to_le32(msg), tgt->ctx_base);
1252
1253 mmiowb();
1254
1255}
1256
1257int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1258{
1259 u32 context_id = tgt->context_id;
1260 struct fcoe_port *port = tgt->port;
1261 u32 reg_off;
1262 resource_size_t reg_base;
1263 struct bnx2fc_hba *hba = port->priv;
1264
1265 reg_base = pci_resource_start(hba->pcidev,
1266 BNX2X_DOORBELL_PCI_BAR);
1267 reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
1268 (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
1269 tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1270 if (!tgt->ctx_base)
1271 return -ENOMEM;
1272 return 0;
1273}
1274
1275char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1276{
1277 char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
1278
1279 if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
1280 return NULL;
1281
1282 tgt->rq_cons_idx += num_items;
1283
1284 if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
1285 tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
1286
1287 return buf;
1288}
1289
1290void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1291{
1292 /* return the rq buffer */
1293 u32 next_prod_idx = tgt->rq_prod_idx + num_items;
1294 if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
1295 /* Wrap around RQ */
1296 next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
1297 }
1298 tgt->rq_prod_idx = next_prod_idx;
1299 tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1300}
1301
1302void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1303 struct fcoe_task_ctx_entry *task,
1304 u16 orig_xid)
1305{
1306 u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
1307 struct bnx2fc_rport *tgt = io_req->tgt;
1308 u32 context_id = tgt->context_id;
1309
1310 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1311
1312 /* Tx Write Rx Read */
1313 task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1314 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
1315 task->tx_wr_rx_rd.init_flags = task_type <<
1316 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
1317 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1318 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
1319 /* Common */
1320 task->cmn.common_flags = context_id <<
1321 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
1322 task->cmn.general.cleanup_info.task_id = orig_xid;
1323
1324
1325}
1326
1327void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1328 struct fcoe_task_ctx_entry *task)
1329{
1330 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1331 struct bnx2fc_rport *tgt = io_req->tgt;
1332 struct fc_frame_header *fc_hdr;
1333 u8 task_type = 0;
1334 u64 *hdr;
1335 u64 temp_hdr[3];
1336 u32 context_id;
1337
1338
1339 /* Obtain task_type */
1340 if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
1341 (io_req->cmd_type == BNX2FC_ELS)) {
1342 task_type = FCOE_TASK_TYPE_MIDPATH;
1343 } else if (io_req->cmd_type == BNX2FC_ABTS) {
1344 task_type = FCOE_TASK_TYPE_ABTS;
1345 }
1346
1347 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1348
1349 /* Setup the task from io_req for easy reference */
1350 io_req->task = task;
1351
1352 BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
1353 io_req->cmd_type, task_type);
1354
1355 /* Tx only */
1356 if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1357 (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
1358 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
1359 (u32)mp_req->mp_req_bd_dma;
1360 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1361 (u32)((u64)mp_req->mp_req_bd_dma >> 32);
1362 task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
1363 BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n",
1364 (unsigned long long)mp_req->mp_req_bd_dma);
1365 }
1366
1367 /* Tx Write Rx Read */
1368 task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1369 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
1370 task->tx_wr_rx_rd.init_flags = task_type <<
1371 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
1372 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
1373 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
1374 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1375 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
1376
1377 /* Common */
1378 task->cmn.data_2_trns = io_req->data_xfer_len;
1379 context_id = tgt->context_id;
1380 task->cmn.common_flags = context_id <<
1381 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
1382 task->cmn.common_flags |= 1 <<
1383 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
1384 task->cmn.common_flags |= 1 <<
1385 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
1386
1387 /* Rx Write Tx Read */
1388 fc_hdr = &(mp_req->req_fc_hdr);
1389 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1390 fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1391 fc_hdr->fh_rx_id = htons(0xffff);
1392 task->rx_wr_tx_rd.rx_id = 0xffff;
1393 } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1394 fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1395 }
1396
1397 /* Fill FC Header into middle path buffer */
1398 hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
1399 memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1400 hdr[0] = cpu_to_be64(temp_hdr[0]);
1401 hdr[1] = cpu_to_be64(temp_hdr[1]);
1402 hdr[2] = cpu_to_be64(temp_hdr[2]);
1403
1404 /* Rx Only */
1405 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1406
1407 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
1408 (u32)mp_req->mp_resp_bd_dma;
1409 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1410 (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
1411 task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
1412 }
1413}
1414
1415void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1416 struct fcoe_task_ctx_entry *task)
1417{
1418 u8 task_type;
1419 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1420 struct io_bdt *bd_tbl = io_req->bd_tbl;
1421 struct bnx2fc_rport *tgt = io_req->tgt;
1422 u64 *fcp_cmnd;
1423 u64 tmp_fcp_cmnd[4];
1424 u32 context_id;
1425 int cnt, i;
1426 int bd_count;
1427
1428 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1429
1430 /* Setup the task from io_req for easy reference */
1431 io_req->task = task;
1432
1433 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1434 task_type = FCOE_TASK_TYPE_WRITE;
1435 else
1436 task_type = FCOE_TASK_TYPE_READ;
1437
1438 /* Tx only */
1439 if (task_type == FCOE_TASK_TYPE_WRITE) {
1440 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
1441 (u32)bd_tbl->bd_tbl_dma;
1442 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1443 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1444 task->tx_wr_only.sgl_ctx.mul_sges.sgl_size =
1445 bd_tbl->bd_valid;
1446 }
1447
1448 /*Tx Write Rx Read */
1449 /* Init state to NORMAL */
1450 task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1451 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
1452 task->tx_wr_rx_rd.init_flags = task_type <<
1453 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
1454 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
1455 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
1456 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1457 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
1458
1459 /* Common */
1460 task->cmn.data_2_trns = io_req->data_xfer_len;
1461 context_id = tgt->context_id;
1462 task->cmn.common_flags = context_id <<
1463 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
1464 task->cmn.common_flags |= 1 <<
1465 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
1466 task->cmn.common_flags |= 1 <<
1467 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
1468
1469 /* Set initiative ownership */
1470 task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT;
1471
1472 /* Set initial seq counter */
1473 task->cmn.tx_low_seq_cnt = 1;
1474
1475 /* Set state to "waiting for the first packet" */
1476 task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME;
1477
1478 /* Fill FCP_CMND IU */
1479 fcp_cmnd = (u64 *)
1480 task->cmn.general.cmd_info.fcp_cmd_payload.opaque;
1481 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1482
1483 /* swap fcp_cmnd */
1484 cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
1485
1486 for (i = 0; i < cnt; i++) {
1487 *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
1488 fcp_cmnd++;
1489 }
1490
1491 /* Rx Write Tx Read */
1492 task->rx_wr_tx_rd.rx_id = 0xffff;
1493
1494 /* Rx Only */
1495 if (task_type == FCOE_TASK_TYPE_READ) {
1496
1497 bd_count = bd_tbl->bd_valid;
1498 if (bd_count == 1) {
1499
1500 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1501
1502 task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo =
1503 fcoe_bd_tbl->buf_addr_lo;
1504 task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi =
1505 fcoe_bd_tbl->buf_addr_hi;
1506 task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem =
1507 fcoe_bd_tbl->buf_len;
1508 task->tx_wr_rx_rd.init_flags |= 1 <<
1509 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT;
1510 } else {
1511
1512 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
1513 (u32)bd_tbl->bd_tbl_dma;
1514 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1515 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1516 task->rx_wr_only.sgl_ctx.mul_sges.sgl_size =
1517 bd_tbl->bd_valid;
1518 }
1519 }
1520}
1521
1522/**
1523 * bnx2fc_setup_task_ctx - allocate and map task context
1524 *
1525 * @hba: pointer to adapter structure
1526 *
1527 * allocate memory for task context, and associated BD table to be used
1528 * by firmware
1529 *
1530 */
1531int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1532{
1533 int rc = 0;
1534 struct regpair *task_ctx_bdt;
1535 dma_addr_t addr;
1536 int i;
1537
1538 /*
1539 * Allocate task context bd table. A page size of bd table
1540 * can map 256 buffers. Each buffer contains 32 task context
1541 * entries. Hence the limit with one page is 8192 task context
1542 * entries.
1543 */
1544 hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1545 PAGE_SIZE,
1546 &hba->task_ctx_bd_dma,
1547 GFP_KERNEL);
1548 if (!hba->task_ctx_bd_tbl) {
1549 printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1550 rc = -1;
1551 goto out;
1552 }
1553 memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
1554
1555 /*
1556 * Allocate task_ctx which is an array of pointers pointing to
1557 * a page containing 32 task contexts
1558 */
1559 hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
1560 GFP_KERNEL);
1561 if (!hba->task_ctx) {
1562 printk(KERN_ERR PFX "unable to allocate task context array\n");
1563 rc = -1;
1564 goto out1;
1565 }
1566
1567 /*
1568 * Allocate task_ctx_dma which is an array of dma addresses
1569 */
1570 hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
1571 sizeof(dma_addr_t)), GFP_KERNEL);
1572 if (!hba->task_ctx_dma) {
1573 printk(KERN_ERR PFX "unable to alloc context mapping array\n");
1574 rc = -1;
1575 goto out2;
1576 }
1577
1578 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1579 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1580
1581 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1582 PAGE_SIZE,
1583 &hba->task_ctx_dma[i],
1584 GFP_KERNEL);
1585 if (!hba->task_ctx[i]) {
1586 printk(KERN_ERR PFX "unable to alloc task context\n");
1587 rc = -1;
1588 goto out3;
1589 }
1590 memset(hba->task_ctx[i], 0, PAGE_SIZE);
1591 addr = (u64)hba->task_ctx_dma[i];
1592 task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1593 task_ctx_bdt->lo = cpu_to_le32((u32)addr);
1594 task_ctx_bdt++;
1595 }
1596 return 0;
1597
1598out3:
1599 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1600 if (hba->task_ctx[i]) {
1601
1602 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1603 hba->task_ctx[i], hba->task_ctx_dma[i]);
1604 hba->task_ctx[i] = NULL;
1605 }
1606 }
1607
1608 kfree(hba->task_ctx_dma);
1609 hba->task_ctx_dma = NULL;
1610out2:
1611 kfree(hba->task_ctx);
1612 hba->task_ctx = NULL;
1613out1:
1614 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1615 hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1616 hba->task_ctx_bd_tbl = NULL;
1617out:
1618 return rc;
1619}
1620
1621void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1622{
1623 int i;
1624
1625 if (hba->task_ctx_bd_tbl) {
1626 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1627 hba->task_ctx_bd_tbl,
1628 hba->task_ctx_bd_dma);
1629 hba->task_ctx_bd_tbl = NULL;
1630 }
1631
1632 if (hba->task_ctx) {
1633 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1634 if (hba->task_ctx[i]) {
1635 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1636 hba->task_ctx[i],
1637 hba->task_ctx_dma[i]);
1638 hba->task_ctx[i] = NULL;
1639 }
1640 }
1641 kfree(hba->task_ctx);
1642 hba->task_ctx = NULL;
1643 }
1644
1645 kfree(hba->task_ctx_dma);
1646 hba->task_ctx_dma = NULL;
1647}
1648
1649static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1650{
1651 int i;
1652 int segment_count;
1653 int hash_table_size;
1654 u32 *pbl;
1655
1656 segment_count = hba->hash_tbl_segment_count;
1657 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1658 sizeof(struct fcoe_hash_table_entry);
1659
1660 pbl = hba->hash_tbl_pbl;
1661 for (i = 0; i < segment_count; ++i) {
1662 dma_addr_t dma_address;
1663
1664 dma_address = le32_to_cpu(*pbl);
1665 ++pbl;
1666 dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
1667 ++pbl;
1668 dma_free_coherent(&hba->pcidev->dev,
1669 BNX2FC_HASH_TBL_CHUNK_SIZE,
1670 hba->hash_tbl_segments[i],
1671 dma_address);
1672
1673 }
1674
1675 if (hba->hash_tbl_pbl) {
1676 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1677 hba->hash_tbl_pbl,
1678 hba->hash_tbl_pbl_dma);
1679 hba->hash_tbl_pbl = NULL;
1680 }
1681}
1682
1683static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
1684{
1685 int i;
1686 int hash_table_size;
1687 int segment_count;
1688 int segment_array_size;
1689 int dma_segment_array_size;
1690 dma_addr_t *dma_segment_array;
1691 u32 *pbl;
1692
1693 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1694 sizeof(struct fcoe_hash_table_entry);
1695
1696 segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
1697 segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
1698 hba->hash_tbl_segment_count = segment_count;
1699
1700 segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
1701 hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
1702 if (!hba->hash_tbl_segments) {
1703 printk(KERN_ERR PFX "hash table pointers alloc failed\n");
1704 return -ENOMEM;
1705 }
1706 dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
1707 dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
1708 if (!dma_segment_array) {
1709 printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
1710 return -ENOMEM;
1711 }
1712
1713 for (i = 0; i < segment_count; ++i) {
1714 hba->hash_tbl_segments[i] =
1715 dma_alloc_coherent(&hba->pcidev->dev,
1716 BNX2FC_HASH_TBL_CHUNK_SIZE,
1717 &dma_segment_array[i],
1718 GFP_KERNEL);
1719 if (!hba->hash_tbl_segments[i]) {
1720 printk(KERN_ERR PFX "hash segment alloc failed\n");
1721 while (--i >= 0) {
1722 dma_free_coherent(&hba->pcidev->dev,
1723 BNX2FC_HASH_TBL_CHUNK_SIZE,
1724 hba->hash_tbl_segments[i],
1725 dma_segment_array[i]);
1726 hba->hash_tbl_segments[i] = NULL;
1727 }
1728 kfree(dma_segment_array);
1729 return -ENOMEM;
1730 }
1731 memset(hba->hash_tbl_segments[i], 0,
1732 BNX2FC_HASH_TBL_CHUNK_SIZE);
1733 }
1734
1735 hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
1736 PAGE_SIZE,
1737 &hba->hash_tbl_pbl_dma,
1738 GFP_KERNEL);
1739 if (!hba->hash_tbl_pbl) {
1740 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
1741 kfree(dma_segment_array);
1742 return -ENOMEM;
1743 }
1744 memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
1745
1746 pbl = hba->hash_tbl_pbl;
1747 for (i = 0; i < segment_count; ++i) {
1748 u64 paddr = dma_segment_array[i];
1749 *pbl = cpu_to_le32((u32) paddr);
1750 ++pbl;
1751 *pbl = cpu_to_le32((u32) (paddr >> 32));
1752 ++pbl;
1753 }
1754 pbl = hba->hash_tbl_pbl;
1755 i = 0;
1756 while (*pbl && *(pbl + 1)) {
1757 u32 lo;
1758 u32 hi;
1759 lo = *pbl;
1760 ++pbl;
1761 hi = *pbl;
1762 ++pbl;
1763 ++i;
1764 }
1765 kfree(dma_segment_array);
1766 return 0;
1767}
1768
1769/**
1770 * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
1771 *
1772 * @hba: Pointer to adapter structure
1773 *
1774 */
1775int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
1776{
1777 u64 addr;
1778 u32 mem_size;
1779 int i;
1780
1781 if (bnx2fc_allocate_hash_table(hba))
1782 return -ENOMEM;
1783
1784 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
1785 hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
1786 &hba->t2_hash_tbl_ptr_dma,
1787 GFP_KERNEL);
1788 if (!hba->t2_hash_tbl_ptr) {
1789 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
1790 bnx2fc_free_fw_resc(hba);
1791 return -ENOMEM;
1792 }
1793 memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
1794
1795 mem_size = BNX2FC_NUM_MAX_SESS *
1796 sizeof(struct fcoe_t2_hash_table_entry);
1797 hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
1798 &hba->t2_hash_tbl_dma,
1799 GFP_KERNEL);
1800 if (!hba->t2_hash_tbl) {
1801 printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
1802 bnx2fc_free_fw_resc(hba);
1803 return -ENOMEM;
1804 }
1805 memset(hba->t2_hash_tbl, 0x00, mem_size);
1806 for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
1807 addr = (unsigned long) hba->t2_hash_tbl_dma +
1808 ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
1809 hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
1810 hba->t2_hash_tbl[i].next.hi = addr >> 32;
1811 }
1812
1813 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
1814 PAGE_SIZE, &hba->dummy_buf_dma,
1815 GFP_KERNEL);
1816 if (!hba->dummy_buffer) {
1817 printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
1818 bnx2fc_free_fw_resc(hba);
1819 return -ENOMEM;
1820 }
1821
1822 hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
1823 PAGE_SIZE,
1824 &hba->stats_buf_dma,
1825 GFP_KERNEL);
1826 if (!hba->stats_buffer) {
1827 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
1828 bnx2fc_free_fw_resc(hba);
1829 return -ENOMEM;
1830 }
1831 memset(hba->stats_buffer, 0x00, PAGE_SIZE);
1832
1833 return 0;
1834}
1835
1836void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
1837{
1838 u32 mem_size;
1839
1840 if (hba->stats_buffer) {
1841 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1842 hba->stats_buffer, hba->stats_buf_dma);
1843 hba->stats_buffer = NULL;
1844 }
1845
1846 if (hba->dummy_buffer) {
1847 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1848 hba->dummy_buffer, hba->dummy_buf_dma);
1849 hba->dummy_buffer = NULL;
1850 }
1851
1852 if (hba->t2_hash_tbl_ptr) {
1853 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
1854 dma_free_coherent(&hba->pcidev->dev, mem_size,
1855 hba->t2_hash_tbl_ptr,
1856 hba->t2_hash_tbl_ptr_dma);
1857 hba->t2_hash_tbl_ptr = NULL;
1858 }
1859
1860 if (hba->t2_hash_tbl) {
1861 mem_size = BNX2FC_NUM_MAX_SESS *
1862 sizeof(struct fcoe_t2_hash_table_entry);
1863 dma_free_coherent(&hba->pcidev->dev, mem_size,
1864 hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
1865 hba->t2_hash_tbl = NULL;
1866 }
1867 bnx2fc_free_hash_table(hba);
1868}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
new file mode 100644
index 000000000000..0f1dd23730db
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -0,0 +1,1833 @@
1/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * IO manager and SCSI IO processing.
3 *
4 * Copyright (c) 2008 - 2010 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
11 */
12
13#include "bnx2fc.h"
14static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
15 int bd_index);
16static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
17static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
18static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
19 struct bnx2fc_cmd *io_req);
20static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
21static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
22static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
23 struct fcoe_fcp_rsp_payload *fcp_rsp,
24 u8 num_rq);
25
26void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
27 unsigned int timer_msec)
28{
29 struct bnx2fc_hba *hba = io_req->port->priv;
30
31 if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work,
32 msecs_to_jiffies(timer_msec)))
33 kref_get(&io_req->refcount);
34}
35
36static void bnx2fc_cmd_timeout(struct work_struct *work)
37{
38 struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
39 timeout_work.work);
40 struct fc_lport *lport;
41 struct fc_rport_priv *rdata;
42 u8 cmd_type = io_req->cmd_type;
43 struct bnx2fc_rport *tgt = io_req->tgt;
44 int logo_issued;
45 int rc;
46
47 BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d,"
48 "req_flags = %lx\n", cmd_type, io_req->req_flags);
49
50 spin_lock_bh(&tgt->tgt_lock);
51 if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) {
52 clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
53 /*
54 * ideally we should hold the io_req until RRQ complets,
55 * and release io_req from timeout hold.
56 */
57 spin_unlock_bh(&tgt->tgt_lock);
58 bnx2fc_send_rrq(io_req);
59 return;
60 }
61 if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) {
62 BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
63 goto done;
64 }
65
66 switch (cmd_type) {
67 case BNX2FC_SCSI_CMD:
68 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
69 &io_req->req_flags)) {
70 /* Handle eh_abort timeout */
71 BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
72 complete(&io_req->tm_done);
73 } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
74 &io_req->req_flags)) {
75 /* Handle internally generated ABTS timeout */
76 BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
77 io_req->refcount.refcount.counter);
78 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
79 &io_req->req_flags))) {
80
81 lport = io_req->port->lport;
82 rdata = io_req->tgt->rdata;
83 logo_issued = test_and_set_bit(
84 BNX2FC_FLAG_EXPL_LOGO,
85 &tgt->flags);
86 kref_put(&io_req->refcount, bnx2fc_cmd_release);
87 spin_unlock_bh(&tgt->tgt_lock);
88
89 /* Explicitly logo the target */
90 if (!logo_issued) {
91 BNX2FC_IO_DBG(io_req, "Explicit "
92 "logo - tgt flags = 0x%lx\n",
93 tgt->flags);
94
95 mutex_lock(&lport->disc.disc_mutex);
96 lport->tt.rport_logoff(rdata);
97 mutex_unlock(&lport->disc.disc_mutex);
98 }
99 return;
100 }
101 } else {
102 /* Hanlde IO timeout */
103 BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
104 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL,
105 &io_req->req_flags)) {
106 BNX2FC_IO_DBG(io_req, "IO completed before "
107 " timer expiry\n");
108 goto done;
109 }
110
111 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
112 &io_req->req_flags)) {
113 rc = bnx2fc_initiate_abts(io_req);
114 if (rc == SUCCESS)
115 goto done;
116 /*
117 * Explicitly logo the target if
118 * abts initiation fails
119 */
120 lport = io_req->port->lport;
121 rdata = io_req->tgt->rdata;
122 logo_issued = test_and_set_bit(
123 BNX2FC_FLAG_EXPL_LOGO,
124 &tgt->flags);
125 kref_put(&io_req->refcount, bnx2fc_cmd_release);
126 spin_unlock_bh(&tgt->tgt_lock);
127
128 if (!logo_issued) {
129 BNX2FC_IO_DBG(io_req, "Explicit "
130 "logo - tgt flags = 0x%lx\n",
131 tgt->flags);
132
133
134 mutex_lock(&lport->disc.disc_mutex);
135 lport->tt.rport_logoff(rdata);
136 mutex_unlock(&lport->disc.disc_mutex);
137 }
138 return;
139 } else {
140 BNX2FC_IO_DBG(io_req, "IO already in "
141 "ABTS processing\n");
142 }
143 }
144 break;
145 case BNX2FC_ELS:
146
147 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
148 BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
149
150 if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
151 &io_req->req_flags)) {
152 lport = io_req->port->lport;
153 rdata = io_req->tgt->rdata;
154 logo_issued = test_and_set_bit(
155 BNX2FC_FLAG_EXPL_LOGO,
156 &tgt->flags);
157 kref_put(&io_req->refcount, bnx2fc_cmd_release);
158 spin_unlock_bh(&tgt->tgt_lock);
159
160 /* Explicitly logo the target */
161 if (!logo_issued) {
162 BNX2FC_IO_DBG(io_req, "Explicitly logo"
163 "(els)\n");
164 mutex_lock(&lport->disc.disc_mutex);
165 lport->tt.rport_logoff(rdata);
166 mutex_unlock(&lport->disc.disc_mutex);
167 }
168 return;
169 }
170 } else {
171 /*
172 * Handle ELS timeout.
173 * tgt_lock is used to sync compl path and timeout
174 * path. If els compl path is processing this IO, we
175 * have nothing to do here, just release the timer hold
176 */
177 BNX2FC_IO_DBG(io_req, "ELS timed out\n");
178 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
179 &io_req->req_flags))
180 goto done;
181
182 /* Indicate the cb_func that this ELS is timed out */
183 set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags);
184
185 if ((io_req->cb_func) && (io_req->cb_arg)) {
186 io_req->cb_func(io_req->cb_arg);
187 io_req->cb_arg = NULL;
188 }
189 }
190 break;
191 default:
192 printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n",
193 cmd_type);
194 break;
195 }
196
197done:
198 /* release the cmd that was held when timer was set */
199 kref_put(&io_req->refcount, bnx2fc_cmd_release);
200 spin_unlock_bh(&tgt->tgt_lock);
201}
202
203static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
204{
205 /* Called with host lock held */
206 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
207
208 /*
209 * active_cmd_queue may have other command types as well,
210 * and during flush operation, we want to error back only
211 * scsi commands.
212 */
213 if (io_req->cmd_type != BNX2FC_SCSI_CMD)
214 return;
215
216 BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
217 bnx2fc_unmap_sg_list(io_req);
218 io_req->sc_cmd = NULL;
219 if (!sc_cmd) {
220 printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
221 "IO(0x%x) already cleaned up\n",
222 io_req->xid);
223 return;
224 }
225 sc_cmd->result = err_code << 16;
226
227 BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
228 sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
229 sc_cmd->allowed);
230 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
231 sc_cmd->SCp.ptr = NULL;
232 sc_cmd->scsi_done(sc_cmd);
233}
234
235struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
236 u16 min_xid, u16 max_xid)
237{
238 struct bnx2fc_cmd_mgr *cmgr;
239 struct io_bdt *bdt_info;
240 struct bnx2fc_cmd *io_req;
241 size_t len;
242 u32 mem_size;
243 u16 xid;
244 int i;
245 int num_ios;
246 size_t bd_tbl_sz;
247
248 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
249 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
250 and max_xid 0x%x\n", min_xid, max_xid);
251 return NULL;
252 }
253 BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid);
254
255 num_ios = max_xid - min_xid + 1;
256 len = (num_ios * (sizeof(struct bnx2fc_cmd *)));
257 len += sizeof(struct bnx2fc_cmd_mgr);
258
259 cmgr = kzalloc(len, GFP_KERNEL);
260 if (!cmgr) {
261 printk(KERN_ERR PFX "failed to alloc cmgr\n");
262 return NULL;
263 }
264
265 cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) *
266 num_possible_cpus(), GFP_KERNEL);
267 if (!cmgr->free_list) {
268 printk(KERN_ERR PFX "failed to alloc free_list\n");
269 goto mem_err;
270 }
271
272 cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) *
273 num_possible_cpus(), GFP_KERNEL);
274 if (!cmgr->free_list_lock) {
275 printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
276 goto mem_err;
277 }
278
279 cmgr->hba = hba;
280 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
281
282 for (i = 0; i < num_possible_cpus(); i++) {
283 INIT_LIST_HEAD(&cmgr->free_list[i]);
284 spin_lock_init(&cmgr->free_list_lock[i]);
285 }
286
287 /* Pre-allocated pool of bnx2fc_cmds */
288 xid = BNX2FC_MIN_XID;
289 for (i = 0; i < num_ios; i++) {
290 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
291
292 if (!io_req) {
293 printk(KERN_ERR PFX "failed to alloc io_req\n");
294 goto mem_err;
295 }
296
297 INIT_LIST_HEAD(&io_req->link);
298 INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
299
300 io_req->xid = xid++;
301 if (io_req->xid >= BNX2FC_MAX_OUTSTANDING_CMNDS)
302 printk(KERN_ERR PFX "ERROR allocating xids - 0x%x\n",
303 io_req->xid);
304 list_add_tail(&io_req->link,
305 &cmgr->free_list[io_req->xid % num_possible_cpus()]);
306 io_req++;
307 }
308
309 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
310 mem_size = num_ios * sizeof(struct io_bdt *);
311 cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
312 if (!cmgr->io_bdt_pool) {
313 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
314 goto mem_err;
315 }
316
317 mem_size = sizeof(struct io_bdt);
318 for (i = 0; i < num_ios; i++) {
319 cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL);
320 if (!cmgr->io_bdt_pool[i]) {
321 printk(KERN_ERR PFX "failed to alloc "
322 "io_bdt_pool[%d]\n", i);
323 goto mem_err;
324 }
325 }
326
327 /* Allocate an map fcoe_bdt_ctx structures */
328 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
329 for (i = 0; i < num_ios; i++) {
330 bdt_info = cmgr->io_bdt_pool[i];
331 bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
332 bd_tbl_sz,
333 &bdt_info->bd_tbl_dma,
334 GFP_KERNEL);
335 if (!bdt_info->bd_tbl) {
336 printk(KERN_ERR PFX "failed to alloc "
337 "bdt_tbl[%d]\n", i);
338 goto mem_err;
339 }
340 }
341
342 return cmgr;
343
344mem_err:
345 bnx2fc_cmd_mgr_free(cmgr);
346 return NULL;
347}
348
349void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
350{
351 struct io_bdt *bdt_info;
352 struct bnx2fc_hba *hba = cmgr->hba;
353 size_t bd_tbl_sz;
354 u16 min_xid = BNX2FC_MIN_XID;
355 u16 max_xid = BNX2FC_MAX_XID;
356 int num_ios;
357 int i;
358
359 num_ios = max_xid - min_xid + 1;
360
361 /* Free fcoe_bdt_ctx structures */
362 if (!cmgr->io_bdt_pool)
363 goto free_cmd_pool;
364
365 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
366 for (i = 0; i < num_ios; i++) {
367 bdt_info = cmgr->io_bdt_pool[i];
368 if (bdt_info->bd_tbl) {
369 dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz,
370 bdt_info->bd_tbl,
371 bdt_info->bd_tbl_dma);
372 bdt_info->bd_tbl = NULL;
373 }
374 }
375
376 /* Destroy io_bdt pool */
377 for (i = 0; i < num_ios; i++) {
378 kfree(cmgr->io_bdt_pool[i]);
379 cmgr->io_bdt_pool[i] = NULL;
380 }
381
382 kfree(cmgr->io_bdt_pool);
383 cmgr->io_bdt_pool = NULL;
384
385free_cmd_pool:
386 kfree(cmgr->free_list_lock);
387
388 /* Destroy cmd pool */
389 if (!cmgr->free_list)
390 goto free_cmgr;
391
392 for (i = 0; i < num_possible_cpus(); i++) {
393 struct list_head *list;
394 struct list_head *tmp;
395
396 list_for_each_safe(list, tmp, &cmgr->free_list[i]) {
397 struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list;
398 list_del(&io_req->link);
399 kfree(io_req);
400 }
401 }
402 kfree(cmgr->free_list);
403free_cmgr:
404 /* Free command manager itself */
405 kfree(cmgr);
406}
407
408struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
409{
410 struct fcoe_port *port = tgt->port;
411 struct bnx2fc_hba *hba = port->priv;
412 struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
413 struct bnx2fc_cmd *io_req;
414 struct list_head *listp;
415 struct io_bdt *bd_tbl;
416 u32 max_sqes;
417 u16 xid;
418
419 max_sqes = tgt->max_sqes;
420 switch (type) {
421 case BNX2FC_TASK_MGMT_CMD:
422 max_sqes = BNX2FC_TM_MAX_SQES;
423 break;
424 case BNX2FC_ELS:
425 max_sqes = BNX2FC_ELS_MAX_SQES;
426 break;
427 default:
428 break;
429 }
430
431 /*
432 * NOTE: Free list insertions and deletions are protected with
433 * cmgr lock
434 */
435 spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
436 if ((list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) ||
437 (tgt->num_active_ios.counter >= max_sqes)) {
438 BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
439 "ios(%d):sqes(%d)\n",
440 tgt->num_active_ios.counter, tgt->max_sqes);
441 if (list_empty(&(cmd_mgr->free_list[smp_processor_id()])))
442 printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
443 spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
444 return NULL;
445 }
446
447 listp = (struct list_head *)
448 cmd_mgr->free_list[smp_processor_id()].next;
449 list_del_init(listp);
450 io_req = (struct bnx2fc_cmd *) listp;
451 xid = io_req->xid;
452 cmd_mgr->cmds[xid] = io_req;
453 atomic_inc(&tgt->num_active_ios);
454 spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
455
456 INIT_LIST_HEAD(&io_req->link);
457
458 io_req->port = port;
459 io_req->cmd_mgr = cmd_mgr;
460 io_req->req_flags = 0;
461 io_req->cmd_type = type;
462
463 /* Bind io_bdt for this io_req */
464 /* Have a static link between io_req and io_bdt_pool */
465 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
466 bd_tbl->io_req = io_req;
467
468 /* Hold the io_req against deletion */
469 kref_init(&io_req->refcount);
470 return io_req;
471}
472static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
473{
474 struct fcoe_port *port = tgt->port;
475 struct bnx2fc_hba *hba = port->priv;
476 struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
477 struct bnx2fc_cmd *io_req;
478 struct list_head *listp;
479 struct io_bdt *bd_tbl;
480 u32 max_sqes;
481 u16 xid;
482
483 max_sqes = BNX2FC_SCSI_MAX_SQES;
484 /*
485 * NOTE: Free list insertions and deletions are protected with
486 * cmgr lock
487 */
488 spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
489 if ((list_empty(&cmd_mgr->free_list[smp_processor_id()])) ||
490 (tgt->num_active_ios.counter >= max_sqes)) {
491 spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
492 return NULL;
493 }
494
495 listp = (struct list_head *)
496 cmd_mgr->free_list[smp_processor_id()].next;
497 list_del_init(listp);
498 io_req = (struct bnx2fc_cmd *) listp;
499 xid = io_req->xid;
500 cmd_mgr->cmds[xid] = io_req;
501 atomic_inc(&tgt->num_active_ios);
502 spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
503
504 INIT_LIST_HEAD(&io_req->link);
505
506 io_req->port = port;
507 io_req->cmd_mgr = cmd_mgr;
508 io_req->req_flags = 0;
509
510 /* Bind io_bdt for this io_req */
511 /* Have a static link between io_req and io_bdt_pool */
512 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
513 bd_tbl->io_req = io_req;
514
515 /* Hold the io_req against deletion */
516 kref_init(&io_req->refcount);
517 return io_req;
518}
519
520void bnx2fc_cmd_release(struct kref *ref)
521{
522 struct bnx2fc_cmd *io_req = container_of(ref,
523 struct bnx2fc_cmd, refcount);
524 struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
525
526 spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
527 if (io_req->cmd_type != BNX2FC_SCSI_CMD)
528 bnx2fc_free_mp_resc(io_req);
529 cmd_mgr->cmds[io_req->xid] = NULL;
530 /* Delete IO from retire queue */
531 list_del_init(&io_req->link);
532 /* Add it to the free list */
533 list_add(&io_req->link,
534 &cmd_mgr->free_list[smp_processor_id()]);
535 atomic_dec(&io_req->tgt->num_active_ios);
536 spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
537}
538
539static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
540{
541 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
542 struct bnx2fc_hba *hba = io_req->port->priv;
543 size_t sz = sizeof(struct fcoe_bd_ctx);
544
545 /* clear tm flags */
546 mp_req->tm_flags = 0;
547 if (mp_req->mp_req_bd) {
548 dma_free_coherent(&hba->pcidev->dev, sz,
549 mp_req->mp_req_bd,
550 mp_req->mp_req_bd_dma);
551 mp_req->mp_req_bd = NULL;
552 }
553 if (mp_req->mp_resp_bd) {
554 dma_free_coherent(&hba->pcidev->dev, sz,
555 mp_req->mp_resp_bd,
556 mp_req->mp_resp_bd_dma);
557 mp_req->mp_resp_bd = NULL;
558 }
559 if (mp_req->req_buf) {
560 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
561 mp_req->req_buf,
562 mp_req->req_buf_dma);
563 mp_req->req_buf = NULL;
564 }
565 if (mp_req->resp_buf) {
566 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
567 mp_req->resp_buf,
568 mp_req->resp_buf_dma);
569 mp_req->resp_buf = NULL;
570 }
571}
572
573int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
574{
575 struct bnx2fc_mp_req *mp_req;
576 struct fcoe_bd_ctx *mp_req_bd;
577 struct fcoe_bd_ctx *mp_resp_bd;
578 struct bnx2fc_hba *hba = io_req->port->priv;
579 dma_addr_t addr;
580 size_t sz;
581
582 mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
583 memset(mp_req, 0, sizeof(struct bnx2fc_mp_req));
584
585 mp_req->req_len = sizeof(struct fcp_cmnd);
586 io_req->data_xfer_len = mp_req->req_len;
587 mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
588 &mp_req->req_buf_dma,
589 GFP_ATOMIC);
590 if (!mp_req->req_buf) {
591 printk(KERN_ERR PFX "unable to alloc MP req buffer\n");
592 bnx2fc_free_mp_resc(io_req);
593 return FAILED;
594 }
595
596 mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
597 &mp_req->resp_buf_dma,
598 GFP_ATOMIC);
599 if (!mp_req->resp_buf) {
600 printk(KERN_ERR PFX "unable to alloc TM resp buffer\n");
601 bnx2fc_free_mp_resc(io_req);
602 return FAILED;
603 }
604 memset(mp_req->req_buf, 0, PAGE_SIZE);
605 memset(mp_req->resp_buf, 0, PAGE_SIZE);
606
607 /* Allocate and map mp_req_bd and mp_resp_bd */
608 sz = sizeof(struct fcoe_bd_ctx);
609 mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
610 &mp_req->mp_req_bd_dma,
611 GFP_ATOMIC);
612 if (!mp_req->mp_req_bd) {
613 printk(KERN_ERR PFX "unable to alloc MP req bd\n");
614 bnx2fc_free_mp_resc(io_req);
615 return FAILED;
616 }
617 mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
618 &mp_req->mp_resp_bd_dma,
619 GFP_ATOMIC);
620 if (!mp_req->mp_req_bd) {
621 printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
622 bnx2fc_free_mp_resc(io_req);
623 return FAILED;
624 }
625 /* Fill bd table */
626 addr = mp_req->req_buf_dma;
627 mp_req_bd = mp_req->mp_req_bd;
628 mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
629 mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
630 mp_req_bd->buf_len = PAGE_SIZE;
631 mp_req_bd->flags = 0;
632
633 /*
634 * MP buffer is either a task mgmt command or an ELS.
635 * So the assumption is that it consumes a single bd
636 * entry in the bd table
637 */
638 mp_resp_bd = mp_req->mp_resp_bd;
639 addr = mp_req->resp_buf_dma;
640 mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
641 mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
642 mp_resp_bd->buf_len = PAGE_SIZE;
643 mp_resp_bd->flags = 0;
644
645 return SUCCESS;
646}
647
648static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
649{
650 struct fc_lport *lport;
651 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
652 struct fc_rport_libfc_priv *rp = rport->dd_data;
653 struct fcoe_port *port;
654 struct bnx2fc_hba *hba;
655 struct bnx2fc_rport *tgt;
656 struct bnx2fc_cmd *io_req;
657 struct bnx2fc_mp_req *tm_req;
658 struct fcoe_task_ctx_entry *task;
659 struct fcoe_task_ctx_entry *task_page;
660 struct Scsi_Host *host = sc_cmd->device->host;
661 struct fc_frame_header *fc_hdr;
662 struct fcp_cmnd *fcp_cmnd;
663 int task_idx, index;
664 int rc = SUCCESS;
665 u16 xid;
666 u32 sid, did;
667 unsigned long start = jiffies;
668
669 lport = shost_priv(host);
670 port = lport_priv(lport);
671 hba = port->priv;
672
673 if (rport == NULL) {
674 printk(KERN_ALERT PFX "device_reset: rport is NULL\n");
675 rc = FAILED;
676 goto tmf_err;
677 }
678
679 rc = fc_block_scsi_eh(sc_cmd);
680 if (rc)
681 return rc;
682
683 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
684 printk(KERN_ERR PFX "device_reset: link is not ready\n");
685 rc = FAILED;
686 goto tmf_err;
687 }
688 /* rport and tgt are allocated together, so tgt should be non-NULL */
689 tgt = (struct bnx2fc_rport *)&rp[1];
690
691 if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
692 printk(KERN_ERR PFX "device_reset: tgt not offloaded\n");
693 rc = FAILED;
694 goto tmf_err;
695 }
696retry_tmf:
697 io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD);
698 if (!io_req) {
699 if (time_after(jiffies, start + HZ)) {
700 printk(KERN_ERR PFX "tmf: Failed TMF");
701 rc = FAILED;
702 goto tmf_err;
703 }
704 msleep(20);
705 goto retry_tmf;
706 }
707 /* Initialize rest of io_req fields */
708 io_req->sc_cmd = sc_cmd;
709 io_req->port = port;
710 io_req->tgt = tgt;
711
712 tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
713
714 rc = bnx2fc_init_mp_req(io_req);
715 if (rc == FAILED) {
716 printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
717 kref_put(&io_req->refcount, bnx2fc_cmd_release);
718 goto tmf_err;
719 }
720
721 /* Set TM flags */
722 io_req->io_req_flags = 0;
723 tm_req->tm_flags = tm_flags;
724
725 /* Fill FCP_CMND */
726 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
727 fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
728 memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len);
729 fcp_cmnd->fc_dl = 0;
730
731 /* Fill FC header */
732 fc_hdr = &(tm_req->req_fc_hdr);
733 sid = tgt->sid;
734 did = rport->port_id;
735 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid,
736 FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
737 FC_FC_SEQ_INIT, 0);
738 /* Obtain exchange id */
739 xid = io_req->xid;
740
741 BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
742 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
743 index = xid % BNX2FC_TASKS_PER_PAGE;
744
745 /* Initialize task context for this IO request */
746 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
747 task = &(task_page[index]);
748 bnx2fc_init_mp_task(io_req, task);
749
750 sc_cmd->SCp.ptr = (char *)io_req;
751
752 /* Obtain free SQ entry */
753 spin_lock_bh(&tgt->tgt_lock);
754 bnx2fc_add_2_sq(tgt, xid);
755
756 /* Enqueue the io_req to active_tm_queue */
757 io_req->on_tmf_queue = 1;
758 list_add_tail(&io_req->link, &tgt->active_tm_queue);
759
760 init_completion(&io_req->tm_done);
761 io_req->wait_for_comp = 1;
762
763 /* Ring doorbell */
764 bnx2fc_ring_doorbell(tgt);
765 spin_unlock_bh(&tgt->tgt_lock);
766
767 rc = wait_for_completion_timeout(&io_req->tm_done,
768 BNX2FC_TM_TIMEOUT * HZ);
769 spin_lock_bh(&tgt->tgt_lock);
770
771 io_req->wait_for_comp = 0;
772 if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags)))
773 set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
774
775 spin_unlock_bh(&tgt->tgt_lock);
776
777 if (!rc) {
778 printk(KERN_ERR PFX "task mgmt command failed...\n");
779 rc = FAILED;
780 } else {
781 printk(KERN_ERR PFX "task mgmt command success...\n");
782 rc = SUCCESS;
783 }
784tmf_err:
785 return rc;
786}
787
788int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
789{
790 struct fc_lport *lport;
791 struct bnx2fc_rport *tgt = io_req->tgt;
792 struct fc_rport *rport = tgt->rport;
793 struct fc_rport_priv *rdata = tgt->rdata;
794 struct bnx2fc_hba *hba;
795 struct fcoe_port *port;
796 struct bnx2fc_cmd *abts_io_req;
797 struct fcoe_task_ctx_entry *task;
798 struct fcoe_task_ctx_entry *task_page;
799 struct fc_frame_header *fc_hdr;
800 struct bnx2fc_mp_req *abts_req;
801 int task_idx, index;
802 u32 sid, did;
803 u16 xid;
804 int rc = SUCCESS;
805 u32 r_a_tov = rdata->r_a_tov;
806
807 /* called with tgt_lock held */
808 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
809
810 port = io_req->port;
811 hba = port->priv;
812 lport = port->lport;
813
814 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
815 printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n");
816 rc = FAILED;
817 goto abts_err;
818 }
819
820 if (rport == NULL) {
821 printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n");
822 rc = FAILED;
823 goto abts_err;
824 }
825
826 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
827 printk(KERN_ERR PFX "initiate_abts: link is not ready\n");
828 rc = FAILED;
829 goto abts_err;
830 }
831
832 abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
833 if (!abts_io_req) {
834 printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
835 rc = FAILED;
836 goto abts_err;
837 }
838
839 /* Initialize rest of io_req fields */
840 abts_io_req->sc_cmd = NULL;
841 abts_io_req->port = port;
842 abts_io_req->tgt = tgt;
843 abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */
844
845 abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req);
846 memset(abts_req, 0, sizeof(struct bnx2fc_mp_req));
847
848 /* Fill FC header */
849 fc_hdr = &(abts_req->req_fc_hdr);
850
851 /* Obtain oxid and rxid for the original exchange to be aborted */
852 fc_hdr->fh_ox_id = htons(io_req->xid);
853 fc_hdr->fh_rx_id = htons(io_req->task->rx_wr_tx_rd.rx_id);
854
855 sid = tgt->sid;
856 did = rport->port_id;
857
858 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid,
859 FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
860 FC_FC_SEQ_INIT, 0);
861
862 xid = abts_io_req->xid;
863 BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n");
864 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
865 index = xid % BNX2FC_TASKS_PER_PAGE;
866
867 /* Initialize task context for this IO request */
868 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
869 task = &(task_page[index]);
870 bnx2fc_init_mp_task(abts_io_req, task);
871
872 /*
873 * ABTS task is a temporary task that will be cleaned up
874 * irrespective of ABTS response. We need to start the timer
875 * for the original exchange, as the CQE is posted for the original
876 * IO request.
877 *
878 * Timer for ABTS is started only when it is originated by a
879 * TM request. For the ABTS issued as part of ULP timeout,
880 * scsi-ml maintains the timers.
881 */
882
883 /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
884 bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov);
885
886 /* Obtain free SQ entry */
887 bnx2fc_add_2_sq(tgt, xid);
888
889 /* Ring doorbell */
890 bnx2fc_ring_doorbell(tgt);
891
892abts_err:
893 return rc;
894}
895
896int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
897{
898 struct fc_lport *lport;
899 struct bnx2fc_rport *tgt = io_req->tgt;
900 struct bnx2fc_hba *hba;
901 struct fcoe_port *port;
902 struct bnx2fc_cmd *cleanup_io_req;
903 struct fcoe_task_ctx_entry *task;
904 struct fcoe_task_ctx_entry *task_page;
905 int task_idx, index;
906 u16 xid, orig_xid;
907 int rc = 0;
908
909 /* ASSUMPTION: called with tgt_lock held */
910 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
911
912 port = io_req->port;
913 hba = port->priv;
914 lport = port->lport;
915
916 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
917 if (!cleanup_io_req) {
918 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
919 rc = -1;
920 goto cleanup_err;
921 }
922
923 /* Initialize rest of io_req fields */
924 cleanup_io_req->sc_cmd = NULL;
925 cleanup_io_req->port = port;
926 cleanup_io_req->tgt = tgt;
927 cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */
928
929 xid = cleanup_io_req->xid;
930
931 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
932 index = xid % BNX2FC_TASKS_PER_PAGE;
933
934 /* Initialize task context for this IO request */
935 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
936 task = &(task_page[index]);
937 orig_xid = io_req->xid;
938
939 BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid);
940
941 bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
942
943 /* Obtain free SQ entry */
944 bnx2fc_add_2_sq(tgt, xid);
945
946 /* Ring doorbell */
947 bnx2fc_ring_doorbell(tgt);
948
949cleanup_err:
950 return rc;
951}
952
953/**
954 * bnx2fc_eh_target_reset: Reset a target
955 *
956 * @sc_cmd: SCSI command
957 *
958 * Set from SCSI host template to send task mgmt command to the target
959 * and wait for the response
960 */
961int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
962{
963 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
964}
965
966/**
967 * bnx2fc_eh_device_reset - Reset a single LUN
968 *
969 * @sc_cmd: SCSI command
970 *
971 * Set from SCSI host template to send task mgmt command to the target
972 * and wait for the response
973 */
974int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
975{
976 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
977}
978
979/**
980 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
981 * SCSI command
982 *
983 * @sc_cmd: SCSI_ML command pointer
984 *
985 * SCSI abort request handler
986 */
987int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
988{
989 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
990 struct fc_rport_libfc_priv *rp = rport->dd_data;
991 struct bnx2fc_cmd *io_req;
992 struct fc_lport *lport;
993 struct bnx2fc_rport *tgt;
994 int rc = FAILED;
995
996
997 rc = fc_block_scsi_eh(sc_cmd);
998 if (rc)
999 return rc;
1000
1001 lport = shost_priv(sc_cmd->device->host);
1002 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1003 printk(KERN_ALERT PFX "eh_abort: link not ready\n");
1004 return rc;
1005 }
1006
1007 tgt = (struct bnx2fc_rport *)&rp[1];
1008
1009 BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
1010
1011 spin_lock_bh(&tgt->tgt_lock);
1012 io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
1013 if (!io_req) {
1014 /* Command might have just completed */
1015 printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
1016 spin_unlock_bh(&tgt->tgt_lock);
1017 return SUCCESS;
1018 }
1019 BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
1020 io_req->refcount.refcount.counter);
1021
1022 /* Hold IO request across abort processing */
1023 kref_get(&io_req->refcount);
1024
1025 BUG_ON(tgt != io_req->tgt);
1026
1027 /* Remove the io_req from the active_q. */
1028 /*
1029 * Task Mgmt functions (LUN RESET & TGT RESET) will not
1030 * issue an ABTS on this particular IO req, as the
1031 * io_req is no longer in the active_q.
1032 */
1033 if (tgt->flush_in_prog) {
1034 printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
1035 "flush in progress\n", io_req->xid);
1036 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1037 spin_unlock_bh(&tgt->tgt_lock);
1038 return SUCCESS;
1039 }
1040
1041 if (io_req->on_active_queue == 0) {
1042 printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
1043 "not on active_q\n", io_req->xid);
1044 /*
1045 * This condition can happen only due to the FW bug,
1046 * where we do not receive cleanup response from
1047 * the FW. Handle this case gracefully by erroring
1048 * back the IO request to SCSI-ml
1049 */
1050 bnx2fc_scsi_done(io_req, DID_ABORT);
1051
1052 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1053 spin_unlock_bh(&tgt->tgt_lock);
1054 return SUCCESS;
1055 }
1056
1057 /*
1058 * Only eh_abort processing will remove the IO from
1059 * active_cmd_q before processing the request. this is
1060 * done to avoid race conditions between IOs aborted
1061 * as part of task management completion and eh_abort
1062 * processing
1063 */
1064 list_del_init(&io_req->link);
1065 io_req->on_active_queue = 0;
1066 /* Move IO req to retire queue */
1067 list_add_tail(&io_req->link, &tgt->io_retire_queue);
1068
1069 init_completion(&io_req->tm_done);
1070 io_req->wait_for_comp = 1;
1071
1072 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
1073 /* Cancel the current timer running on this io_req */
1074 if (cancel_delayed_work(&io_req->timeout_work))
1075 kref_put(&io_req->refcount,
1076 bnx2fc_cmd_release); /* drop timer hold */
1077 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
1078 rc = bnx2fc_initiate_abts(io_req);
1079 } else {
1080 printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
1081 "already in abts processing\n", io_req->xid);
1082 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1083 spin_unlock_bh(&tgt->tgt_lock);
1084 return SUCCESS;
1085 }
1086 if (rc == FAILED) {
1087 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1088 spin_unlock_bh(&tgt->tgt_lock);
1089 return rc;
1090 }
1091 spin_unlock_bh(&tgt->tgt_lock);
1092
1093 wait_for_completion(&io_req->tm_done);
1094
1095 spin_lock_bh(&tgt->tgt_lock);
1096 io_req->wait_for_comp = 0;
1097 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
1098 &io_req->req_flags))) {
1099 /* Let the scsi-ml try to recover this command */
1100 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
1101 io_req->xid);
1102 rc = FAILED;
1103 } else {
1104 /*
1105 * We come here even when there was a race condition
1106 * between timeout and abts completion, and abts
1107 * completion happens just in time.
1108 */
1109 BNX2FC_IO_DBG(io_req, "abort succeeded\n");
1110 rc = SUCCESS;
1111 bnx2fc_scsi_done(io_req, DID_ABORT);
1112 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1113 }
1114
1115 /* release the reference taken in eh_abort */
1116 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1117 spin_unlock_bh(&tgt->tgt_lock);
1118 return rc;
1119}
1120
1121void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
1122 struct fcoe_task_ctx_entry *task,
1123 u8 num_rq)
1124{
1125 BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
1126 "refcnt = %d, cmd_type = %d\n",
1127 io_req->refcount.refcount.counter, io_req->cmd_type);
1128 bnx2fc_scsi_done(io_req, DID_ERROR);
1129 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1130}
1131
1132void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
1133 struct fcoe_task_ctx_entry *task,
1134 u8 num_rq)
1135{
1136 u32 r_ctl;
1137 u32 r_a_tov = FC_DEF_R_A_TOV;
1138 u8 issue_rrq = 0;
1139 struct bnx2fc_rport *tgt = io_req->tgt;
1140
1141 BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
1142 "refcnt = %d, cmd_type = %d\n",
1143 io_req->xid,
1144 io_req->refcount.refcount.counter, io_req->cmd_type);
1145
1146 if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
1147 &io_req->req_flags)) {
1148 BNX2FC_IO_DBG(io_req, "Timer context finished processing"
1149 " this io\n");
1150 return;
1151 }
1152
1153 /* Do not issue RRQ as this IO is already cleanedup */
1154 if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
1155 &io_req->req_flags))
1156 goto io_compl;
1157
1158 /*
1159 * For ABTS issued due to SCSI eh_abort_handler, timeout
1160 * values are maintained by scsi-ml itself. Cancel timeout
1161 * in case ABTS issued as part of task management function
1162 * or due to FW error.
1163 */
1164 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))
1165 if (cancel_delayed_work(&io_req->timeout_work))
1166 kref_put(&io_req->refcount,
1167 bnx2fc_cmd_release); /* drop timer hold */
1168
1169 r_ctl = task->cmn.general.rsp_info.abts_rsp.r_ctl;
1170
1171 switch (r_ctl) {
1172 case FC_RCTL_BA_ACC:
1173 /*
1174 * Dont release this cmd yet. It will be relesed
1175 * after we get RRQ response
1176 */
1177 BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n");
1178 issue_rrq = 1;
1179 break;
1180
1181 case FC_RCTL_BA_RJT:
1182 BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n");
1183 break;
1184 default:
1185 printk(KERN_ERR PFX "Unknown ABTS response\n");
1186 break;
1187 }
1188
1189 if (issue_rrq) {
1190 BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n");
1191 set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
1192 }
1193 set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
1194 bnx2fc_cmd_timer_set(io_req, r_a_tov);
1195
1196io_compl:
1197 if (io_req->wait_for_comp) {
1198 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
1199 &io_req->req_flags))
1200 complete(&io_req->tm_done);
1201 } else {
1202 /*
1203 * We end up here when ABTS is issued as
1204 * in asynchronous context, i.e., as part
1205 * of task management completion, or
1206 * when FW error is received or when the
1207 * ABTS is issued when the IO is timed
1208 * out.
1209 */
1210
1211 if (io_req->on_active_queue) {
1212 list_del_init(&io_req->link);
1213 io_req->on_active_queue = 0;
1214 /* Move IO req to retire queue */
1215 list_add_tail(&io_req->link, &tgt->io_retire_queue);
1216 }
1217 bnx2fc_scsi_done(io_req, DID_ERROR);
1218 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1219 }
1220}
1221
1222static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
1223{
1224 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1225 struct bnx2fc_rport *tgt = io_req->tgt;
1226 struct list_head *list;
1227 struct list_head *tmp;
1228 struct bnx2fc_cmd *cmd;
1229 int tm_lun = sc_cmd->device->lun;
1230 int rc = 0;
1231 int lun;
1232
1233 /* called with tgt_lock held */
1234 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
1235 /*
1236 * Walk thru the active_ios queue and ABORT the IO
1237 * that matches with the LUN that was reset
1238 */
1239 list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
1240 BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
1241 cmd = (struct bnx2fc_cmd *)list;
1242 lun = cmd->sc_cmd->device->lun;
1243 if (lun == tm_lun) {
1244 /* Initiate ABTS on this cmd */
1245 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
1246 &cmd->req_flags)) {
1247 /* cancel the IO timeout */
1248 if (cancel_delayed_work(&io_req->timeout_work))
1249 kref_put(&io_req->refcount,
1250 bnx2fc_cmd_release);
1251 /* timer hold */
1252 rc = bnx2fc_initiate_abts(cmd);
1253 /* abts shouldnt fail in this context */
1254 WARN_ON(rc != SUCCESS);
1255 } else
1256 printk(KERN_ERR PFX "lun_rst: abts already in"
1257 " progress for this IO 0x%x\n",
1258 cmd->xid);
1259 }
1260 }
1261}
1262
1263static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
1264{
1265 struct bnx2fc_rport *tgt = io_req->tgt;
1266 struct list_head *list;
1267 struct list_head *tmp;
1268 struct bnx2fc_cmd *cmd;
1269 int rc = 0;
1270
1271 /* called with tgt_lock held */
1272 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n");
1273 /*
1274 * Walk thru the active_ios queue and ABORT the IO
1275 * that matches with the LUN that was reset
1276 */
1277 list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
1278 BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
1279 cmd = (struct bnx2fc_cmd *)list;
1280 /* Initiate ABTS */
1281 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
1282 &cmd->req_flags)) {
1283 /* cancel the IO timeout */
1284 if (cancel_delayed_work(&io_req->timeout_work))
1285 kref_put(&io_req->refcount,
1286 bnx2fc_cmd_release); /* timer hold */
1287 rc = bnx2fc_initiate_abts(cmd);
1288 /* abts shouldnt fail in this context */
1289 WARN_ON(rc != SUCCESS);
1290
1291 } else
1292 printk(KERN_ERR PFX "tgt_rst: abts already in progress"
1293 " for this IO 0x%x\n", cmd->xid);
1294 }
1295}
1296
1297void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
1298 struct fcoe_task_ctx_entry *task, u8 num_rq)
1299{
1300 struct bnx2fc_mp_req *tm_req;
1301 struct fc_frame_header *fc_hdr;
1302 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1303 u64 *hdr;
1304 u64 *temp_hdr;
1305 void *rsp_buf;
1306
1307 /* Called with tgt_lock held */
1308 BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n");
1309
1310 if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags)))
1311 set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags);
1312 else {
1313 /* TM has already timed out and we got
1314 * delayed completion. Ignore completion
1315 * processing.
1316 */
1317 return;
1318 }
1319
1320 tm_req = &(io_req->mp_req);
1321 fc_hdr = &(tm_req->resp_fc_hdr);
1322 hdr = (u64 *)fc_hdr;
1323 temp_hdr = (u64 *)
1324 &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
1325 hdr[0] = cpu_to_be64(temp_hdr[0]);
1326 hdr[1] = cpu_to_be64(temp_hdr[1]);
1327 hdr[2] = cpu_to_be64(temp_hdr[2]);
1328
1329 tm_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
1330
1331 rsp_buf = tm_req->resp_buf;
1332
1333 if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
1334 bnx2fc_parse_fcp_rsp(io_req,
1335 (struct fcoe_fcp_rsp_payload *)
1336 rsp_buf, num_rq);
1337 if (io_req->fcp_rsp_code == 0) {
1338 /* TM successful */
1339 if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
1340 bnx2fc_lun_reset_cmpl(io_req);
1341 else if (tm_req->tm_flags & FCP_TMF_TGT_RESET)
1342 bnx2fc_tgt_reset_cmpl(io_req);
1343 }
1344 } else {
1345 printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
1346 fc_hdr->fh_r_ctl);
1347 }
1348 if (!sc_cmd->SCp.ptr) {
1349 printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n");
1350 return;
1351 }
1352 switch (io_req->fcp_status) {
1353 case FC_GOOD:
1354 if (io_req->cdb_status == 0) {
1355 /* Good IO completion */
1356 sc_cmd->result = DID_OK << 16;
1357 } else {
1358 /* Transport status is good, SCSI status not good */
1359 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1360 }
1361 if (io_req->fcp_resid)
1362 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1363 break;
1364
1365 default:
1366 BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
1367 io_req->fcp_status);
1368 break;
1369 }
1370
1371 sc_cmd = io_req->sc_cmd;
1372 io_req->sc_cmd = NULL;
1373
1374 /* check if the io_req exists in tgt's tmf_q */
1375 if (io_req->on_tmf_queue) {
1376
1377 list_del_init(&io_req->link);
1378 io_req->on_tmf_queue = 0;
1379 } else {
1380
1381 printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n");
1382 return;
1383 }
1384
1385 sc_cmd->SCp.ptr = NULL;
1386 sc_cmd->scsi_done(sc_cmd);
1387
1388 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1389 if (io_req->wait_for_comp) {
1390 BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
1391 complete(&io_req->tm_done);
1392 }
1393}
1394
1395static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
1396 int bd_index)
1397{
1398 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1399 int frag_size, sg_frags;
1400
1401 sg_frags = 0;
1402 while (sg_len) {
1403 if (sg_len >= BNX2FC_BD_SPLIT_SZ)
1404 frag_size = BNX2FC_BD_SPLIT_SZ;
1405 else
1406 frag_size = sg_len;
1407 bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff;
1408 bd[bd_index + sg_frags].buf_addr_hi = addr >> 32;
1409 bd[bd_index + sg_frags].buf_len = (u16)frag_size;
1410 bd[bd_index + sg_frags].flags = 0;
1411
1412 addr += (u64) frag_size;
1413 sg_frags++;
1414 sg_len -= frag_size;
1415 }
1416 return sg_frags;
1417
1418}
1419
1420static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
1421{
1422 struct scsi_cmnd *sc = io_req->sc_cmd;
1423 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1424 struct scatterlist *sg;
1425 int byte_count = 0;
1426 int sg_count = 0;
1427 int bd_count = 0;
1428 int sg_frags;
1429 unsigned int sg_len;
1430 u64 addr;
1431 int i;
1432
1433 sg_count = scsi_dma_map(sc);
1434 scsi_for_each_sg(sc, sg, sg_count, i) {
1435 sg_len = sg_dma_len(sg);
1436 addr = sg_dma_address(sg);
1437 if (sg_len > BNX2FC_MAX_BD_LEN) {
1438 sg_frags = bnx2fc_split_bd(io_req, addr, sg_len,
1439 bd_count);
1440 } else {
1441
1442 sg_frags = 1;
1443 bd[bd_count].buf_addr_lo = addr & 0xffffffff;
1444 bd[bd_count].buf_addr_hi = addr >> 32;
1445 bd[bd_count].buf_len = (u16)sg_len;
1446 bd[bd_count].flags = 0;
1447 }
1448 bd_count += sg_frags;
1449 byte_count += sg_len;
1450 }
1451 if (byte_count != scsi_bufflen(sc))
1452 printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, "
1453 "task_id = 0x%x\n", byte_count, scsi_bufflen(sc),
1454 io_req->xid);
1455 return bd_count;
1456}
1457
1458static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
1459{
1460 struct scsi_cmnd *sc = io_req->sc_cmd;
1461 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1462 int bd_count;
1463
1464 if (scsi_sg_count(sc))
1465 bd_count = bnx2fc_map_sg(io_req);
1466 else {
1467 bd_count = 0;
1468 bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
1469 bd[0].buf_len = bd[0].flags = 0;
1470 }
1471 io_req->bd_tbl->bd_valid = bd_count;
1472}
1473
1474static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
1475{
1476 struct scsi_cmnd *sc = io_req->sc_cmd;
1477
1478 if (io_req->bd_tbl->bd_valid && sc) {
1479 scsi_dma_unmap(sc);
1480 io_req->bd_tbl->bd_valid = 0;
1481 }
1482}
1483
1484void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
1485 struct fcp_cmnd *fcp_cmnd)
1486{
1487 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1488 char tag[2];
1489
1490 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1491
1492 int_to_scsilun(sc_cmd->device->lun,
1493 (struct scsi_lun *) fcp_cmnd->fc_lun);
1494
1495
1496 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
1497 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
1498
1499 fcp_cmnd->fc_cmdref = 0;
1500 fcp_cmnd->fc_pri_ta = 0;
1501 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
1502 fcp_cmnd->fc_flags = io_req->io_req_flags;
1503
1504 if (scsi_populate_tag_msg(sc_cmd, tag)) {
1505 switch (tag[0]) {
1506 case HEAD_OF_QUEUE_TAG:
1507 fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ;
1508 break;
1509 case ORDERED_QUEUE_TAG:
1510 fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED;
1511 break;
1512 default:
1513 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
1514 break;
1515 }
1516 } else {
1517 fcp_cmnd->fc_pri_ta = 0;
1518 }
1519}
1520
1521static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
1522 struct fcoe_fcp_rsp_payload *fcp_rsp,
1523 u8 num_rq)
1524{
1525 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1526 struct bnx2fc_rport *tgt = io_req->tgt;
1527 u8 rsp_flags = fcp_rsp->fcp_flags.flags;
1528 u32 rq_buff_len = 0;
1529 int i;
1530 unsigned char *rq_data;
1531 unsigned char *dummy;
1532 int fcp_sns_len = 0;
1533 int fcp_rsp_len = 0;
1534
1535 io_req->fcp_status = FC_GOOD;
1536 io_req->fcp_resid = fcp_rsp->fcp_resid;
1537
1538 io_req->scsi_comp_flags = rsp_flags;
1539 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1540 fcp_rsp->scsi_status_code;
1541
1542 /* Fetch fcp_rsp_info and fcp_sns_info if available */
1543 if (num_rq) {
1544
1545 /*
1546 * We do not anticipate num_rq >1, as the linux defined
1547 * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
1548 * 256 bytes of single rq buffer is good enough to hold this.
1549 */
1550
1551 if (rsp_flags &
1552 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) {
1553 fcp_rsp_len = rq_buff_len
1554 = fcp_rsp->fcp_rsp_len;
1555 }
1556
1557 if (rsp_flags &
1558 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) {
1559 fcp_sns_len = fcp_rsp->fcp_sns_len;
1560 rq_buff_len += fcp_rsp->fcp_sns_len;
1561 }
1562
1563 io_req->fcp_rsp_len = fcp_rsp_len;
1564 io_req->fcp_sns_len = fcp_sns_len;
1565
1566 if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
1567 /* Invalid sense sense length. */
1568 printk(KERN_ALERT PFX "invalid sns length %d\n",
1569 rq_buff_len);
1570 /* reset rq_buff_len */
1571 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
1572 }
1573
1574 rq_data = bnx2fc_get_next_rqe(tgt, 1);
1575
1576 if (num_rq > 1) {
1577 /* We do not need extra sense data */
1578 for (i = 1; i < num_rq; i++)
1579 dummy = bnx2fc_get_next_rqe(tgt, 1);
1580 }
1581
1582 /* fetch fcp_rsp_code */
1583 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1584 /* Only for task management function */
1585 io_req->fcp_rsp_code = rq_data[3];
1586 printk(KERN_ERR PFX "fcp_rsp_code = %d\n",
1587 io_req->fcp_rsp_code);
1588 }
1589
1590 /* fetch sense data */
1591 rq_data += fcp_rsp_len;
1592
1593 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1594 printk(KERN_ERR PFX "Truncating sense buffer\n");
1595 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1596 }
1597
1598 memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer));
1599 if (fcp_sns_len)
1600 memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
1601
1602 /* return RQ entries */
1603 for (i = 0; i < num_rq; i++)
1604 bnx2fc_return_rqe(tgt, 1);
1605 }
1606}
1607
1608/**
1609 * bnx2fc_queuecommand - Queuecommand function of the scsi template
1610 *
1611 * @host: The Scsi_Host the command was issued to
1612 * @sc_cmd: struct scsi_cmnd to be executed
1613 *
1614 * This is the IO strategy routine, called by SCSI-ML
1615 **/
1616int bnx2fc_queuecommand(struct Scsi_Host *host,
1617 struct scsi_cmnd *sc_cmd)
1618{
1619 struct fc_lport *lport = shost_priv(host);
1620 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1621 struct fc_rport_libfc_priv *rp = rport->dd_data;
1622 struct bnx2fc_rport *tgt;
1623 struct bnx2fc_cmd *io_req;
1624 int rc = 0;
1625 int rval;
1626
1627 rval = fc_remote_port_chkready(rport);
1628 if (rval) {
1629 sc_cmd->result = rval;
1630 sc_cmd->scsi_done(sc_cmd);
1631 return 0;
1632 }
1633
1634 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1635 rc = SCSI_MLQUEUE_HOST_BUSY;
1636 goto exit_qcmd;
1637 }
1638
1639 /* rport and tgt are allocated together, so tgt should be non-NULL */
1640 tgt = (struct bnx2fc_rport *)&rp[1];
1641
1642 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
1643 /*
1644 * Session is not offloaded yet. Let SCSI-ml retry
1645 * the command.
1646 */
1647 rc = SCSI_MLQUEUE_TARGET_BUSY;
1648 goto exit_qcmd;
1649 }
1650
1651 io_req = bnx2fc_cmd_alloc(tgt);
1652 if (!io_req) {
1653 rc = SCSI_MLQUEUE_HOST_BUSY;
1654 goto exit_qcmd;
1655 }
1656 io_req->sc_cmd = sc_cmd;
1657
1658 if (bnx2fc_post_io_req(tgt, io_req)) {
1659 printk(KERN_ERR PFX "Unable to post io_req\n");
1660 rc = SCSI_MLQUEUE_HOST_BUSY;
1661 goto exit_qcmd;
1662 }
1663exit_qcmd:
1664 return rc;
1665}
1666
1667void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1668 struct fcoe_task_ctx_entry *task,
1669 u8 num_rq)
1670{
1671 struct fcoe_fcp_rsp_payload *fcp_rsp;
1672 struct bnx2fc_rport *tgt = io_req->tgt;
1673 struct scsi_cmnd *sc_cmd;
1674 struct Scsi_Host *host;
1675
1676
1677 /* scsi_cmd_cmpl is called with tgt lock held */
1678
1679 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
1680 /* we will not receive ABTS response for this IO */
1681 BNX2FC_IO_DBG(io_req, "Timer context finished processing "
1682 "this scsi cmd\n");
1683 }
1684
1685 /* Cancel the timeout_work, as we received IO completion */
1686 if (cancel_delayed_work(&io_req->timeout_work))
1687 kref_put(&io_req->refcount,
1688 bnx2fc_cmd_release); /* drop timer hold */
1689
1690 sc_cmd = io_req->sc_cmd;
1691 if (sc_cmd == NULL) {
1692 printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n");
1693 return;
1694 }
1695
1696 /* Fetch fcp_rsp from task context and perform cmd completion */
1697 fcp_rsp = (struct fcoe_fcp_rsp_payload *)
1698 &(task->cmn.general.rsp_info.fcp_rsp.payload);
1699
1700 /* parse fcp_rsp and obtain sense data from RQ if available */
1701 bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
1702
1703 host = sc_cmd->device->host;
1704 if (!sc_cmd->SCp.ptr) {
1705 printk(KERN_ERR PFX "SCp.ptr is NULL\n");
1706 return;
1707 }
1708 io_req->sc_cmd = NULL;
1709
1710 if (io_req->on_active_queue) {
1711 list_del_init(&io_req->link);
1712 io_req->on_active_queue = 0;
1713 /* Move IO req to retire queue */
1714 list_add_tail(&io_req->link, &tgt->io_retire_queue);
1715 } else {
1716 /* This should not happen, but could have been pulled
1717 * by bnx2fc_flush_active_ios(), or during a race
1718 * between command abort and (late) completion.
1719 */
1720 BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
1721 if (io_req->wait_for_comp)
1722 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
1723 &io_req->req_flags))
1724 complete(&io_req->tm_done);
1725 }
1726
1727 bnx2fc_unmap_sg_list(io_req);
1728
1729 switch (io_req->fcp_status) {
1730 case FC_GOOD:
1731 if (io_req->cdb_status == 0) {
1732 /* Good IO completion */
1733 sc_cmd->result = DID_OK << 16;
1734 } else {
1735 /* Transport status is good, SCSI status not good */
1736 BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d"
1737 " fcp_resid = 0x%x\n",
1738 io_req->cdb_status, io_req->fcp_resid);
1739 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1740 }
1741 if (io_req->fcp_resid)
1742 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1743 break;
1744 default:
1745 printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n",
1746 io_req->fcp_status);
1747 break;
1748 }
1749 sc_cmd->SCp.ptr = NULL;
1750 sc_cmd->scsi_done(sc_cmd);
1751 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1752}
1753
1754static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
1755 struct bnx2fc_cmd *io_req)
1756{
1757 struct fcoe_task_ctx_entry *task;
1758 struct fcoe_task_ctx_entry *task_page;
1759 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1760 struct fcoe_port *port = tgt->port;
1761 struct bnx2fc_hba *hba = port->priv;
1762 struct fc_lport *lport = port->lport;
1763 struct fcoe_dev_stats *stats;
1764 int task_idx, index;
1765 u16 xid;
1766
1767 /* Initialize rest of io_req fields */
1768 io_req->cmd_type = BNX2FC_SCSI_CMD;
1769 io_req->port = port;
1770 io_req->tgt = tgt;
1771 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
1772 sc_cmd->SCp.ptr = (char *)io_req;
1773
1774 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1775 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1776 io_req->io_req_flags = BNX2FC_READ;
1777 stats->InputRequests++;
1778 stats->InputBytes += io_req->data_xfer_len;
1779 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
1780 io_req->io_req_flags = BNX2FC_WRITE;
1781 stats->OutputRequests++;
1782 stats->OutputBytes += io_req->data_xfer_len;
1783 } else {
1784 io_req->io_req_flags = 0;
1785 stats->ControlRequests++;
1786 }
1787 put_cpu();
1788
1789 xid = io_req->xid;
1790
1791 /* Build buffer descriptor list for firmware from sg list */
1792 bnx2fc_build_bd_list_from_sg(io_req);
1793
1794 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
1795 index = xid % BNX2FC_TASKS_PER_PAGE;
1796
1797 /* Initialize task context for this IO request */
1798 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
1799 task = &(task_page[index]);
1800 bnx2fc_init_task(io_req, task);
1801
1802 spin_lock_bh(&tgt->tgt_lock);
1803
1804 if (tgt->flush_in_prog) {
1805 printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
1806 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1807 spin_unlock_bh(&tgt->tgt_lock);
1808 return -EAGAIN;
1809 }
1810
1811 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
1812 printk(KERN_ERR PFX "Session not ready...post_io\n");
1813 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1814 spin_unlock_bh(&tgt->tgt_lock);
1815 return -EAGAIN;
1816 }
1817
1818 /* Time IO req */
1819 bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
1820 /* Obtain free SQ entry */
1821 bnx2fc_add_2_sq(tgt, xid);
1822
1823 /* Enqueue the io_req to active_cmd_queue */
1824
1825 io_req->on_active_queue = 1;
1826 /* move io_req from pending_queue to active_queue */
1827 list_add_tail(&io_req->link, &tgt->active_cmd_queue);
1828
1829 /* Ring doorbell */
1830 bnx2fc_ring_doorbell(tgt);
1831 spin_unlock_bh(&tgt->tgt_lock);
1832 return 0;
1833}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
new file mode 100644
index 000000000000..7ea93af60260
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -0,0 +1,844 @@
1/* bnx2fc_tgt.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * Handles operations such as session offload/upload etc, and manages
3 * session resources such as connection id and qp resources.
4 *
5 * Copyright (c) 2008 - 2010 Broadcom Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
12 */
13
14#include "bnx2fc.h"
15static void bnx2fc_upld_timer(unsigned long data);
16static void bnx2fc_ofld_timer(unsigned long data);
17static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
18 struct fcoe_port *port,
19 struct fc_rport_priv *rdata);
20static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
21 struct bnx2fc_rport *tgt);
22static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
23 struct bnx2fc_rport *tgt);
24static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
25 struct bnx2fc_rport *tgt);
26static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
27
28static void bnx2fc_upld_timer(unsigned long data)
29{
30
31 struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
32
33 BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
34 /* fake upload completion */
35 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
36 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
37 wake_up_interruptible(&tgt->upld_wait);
38}
39
40static void bnx2fc_ofld_timer(unsigned long data)
41{
42
43 struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
44
45 BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
46 /* NOTE: This function should never be called, as
47 * offload should never timeout
48 */
49 /*
50 * If the timer has expired, this session is dead
51 * Clear offloaded flag and logout of this device.
52 * Since OFFLOADED flag is cleared, this case
53 * will be considered as offload error and the
54 * port will be logged off, and conn_id, session
55 * resources are freed up in bnx2fc_offload_session
56 */
57 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
58 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
59 wake_up_interruptible(&tgt->ofld_wait);
60}
61
62static void bnx2fc_offload_session(struct fcoe_port *port,
63 struct bnx2fc_rport *tgt,
64 struct fc_rport_priv *rdata)
65{
66 struct fc_lport *lport = rdata->local_port;
67 struct fc_rport *rport = rdata->rport;
68 struct bnx2fc_hba *hba = port->priv;
69 int rval;
70 int i = 0;
71
72 /* Initialize bnx2fc_rport */
73 /* NOTE: tgt is already bzero'd */
74 rval = bnx2fc_init_tgt(tgt, port, rdata);
75 if (rval) {
76 printk(KERN_ERR PFX "Failed to allocate conn id for "
77 "port_id (%6x)\n", rport->port_id);
78 goto ofld_err;
79 }
80
81 /* Allocate session resources */
82 rval = bnx2fc_alloc_session_resc(hba, tgt);
83 if (rval) {
84 printk(KERN_ERR PFX "Failed to allocate resources\n");
85 goto ofld_err;
86 }
87
88 /*
89 * Initialize FCoE session offload process.
90 * Upon completion of offload process add
91 * rport to list of rports
92 */
93retry_ofld:
94 clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
95 rval = bnx2fc_send_session_ofld_req(port, tgt);
96 if (rval) {
97 printk(KERN_ERR PFX "ofld_req failed\n");
98 goto ofld_err;
99 }
100
101 /*
102 * wait for the session is offloaded and enabled. 3 Secs
103 * should be ample time for this process to complete.
104 */
105 setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
106 mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
107
108 wait_event_interruptible(tgt->ofld_wait,
109 (test_bit(
110 BNX2FC_FLAG_OFLD_REQ_CMPL,
111 &tgt->flags)));
112 if (signal_pending(current))
113 flush_signals(current);
114
115 del_timer_sync(&tgt->ofld_timer);
116
117 if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
118 if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
119 &tgt->flags)) {
120 BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, "
121 "retry ofld..%d\n", i++);
122 msleep_interruptible(1000);
123 if (i > 3) {
124 i = 0;
125 goto ofld_err;
126 }
127 goto retry_ofld;
128 }
129 goto ofld_err;
130 }
131 if (bnx2fc_map_doorbell(tgt)) {
132 printk(KERN_ERR PFX "map doorbell failed - no mem\n");
133 /* upload will take care of cleaning up sess resc */
134 lport->tt.rport_logoff(rdata);
135 }
136 return;
137
138ofld_err:
139 /* couldn't offload the session. log off from this rport */
140 BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
141 lport->tt.rport_logoff(rdata);
142 /* Free session resources */
143 bnx2fc_free_session_resc(hba, tgt);
144 if (tgt->fcoe_conn_id != -1)
145 bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
146}
147
148void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
149{
150 struct bnx2fc_cmd *io_req;
151 struct list_head *list;
152 struct list_head *tmp;
153 int rc;
154 int i = 0;
155 BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n",
156 tgt->num_active_ios.counter);
157
158 spin_lock_bh(&tgt->tgt_lock);
159 tgt->flush_in_prog = 1;
160
161 list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
162 i++;
163 io_req = (struct bnx2fc_cmd *)list;
164 list_del_init(&io_req->link);
165 io_req->on_active_queue = 0;
166 BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n");
167
168 if (cancel_delayed_work(&io_req->timeout_work)) {
169 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
170 &io_req->req_flags)) {
171 /* Handle eh_abort timeout */
172 BNX2FC_IO_DBG(io_req, "eh_abort for IO "
173 "cleaned up\n");
174 complete(&io_req->tm_done);
175 }
176 kref_put(&io_req->refcount,
177 bnx2fc_cmd_release); /* drop timer hold */
178 }
179
180 set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags);
181 set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
182 rc = bnx2fc_initiate_cleanup(io_req);
183 BUG_ON(rc);
184 }
185
186 list_for_each_safe(list, tmp, &tgt->els_queue) {
187 i++;
188 io_req = (struct bnx2fc_cmd *)list;
189 list_del_init(&io_req->link);
190 io_req->on_active_queue = 0;
191
192 BNX2FC_IO_DBG(io_req, "els_queue cleanup\n");
193
194 if (cancel_delayed_work(&io_req->timeout_work))
195 kref_put(&io_req->refcount,
196 bnx2fc_cmd_release); /* drop timer hold */
197
198 if ((io_req->cb_func) && (io_req->cb_arg)) {
199 io_req->cb_func(io_req->cb_arg);
200 io_req->cb_arg = NULL;
201 }
202
203 rc = bnx2fc_initiate_cleanup(io_req);
204 BUG_ON(rc);
205 }
206
207 list_for_each_safe(list, tmp, &tgt->io_retire_queue) {
208 i++;
209 io_req = (struct bnx2fc_cmd *)list;
210 list_del_init(&io_req->link);
211
212 BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
213
214 if (cancel_delayed_work(&io_req->timeout_work))
215 kref_put(&io_req->refcount, bnx2fc_cmd_release);
216
217 clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
218 }
219
220 BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i);
221 i = 0;
222 spin_unlock_bh(&tgt->tgt_lock);
223 /* wait for active_ios to go to 0 */
224 while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT))
225 msleep(25);
226 if (tgt->num_active_ios.counter != 0)
227 printk(KERN_ERR PFX "CLEANUP on port 0x%x:"
228 " active_ios = %d\n",
229 tgt->rdata->ids.port_id, tgt->num_active_ios.counter);
230 spin_lock_bh(&tgt->tgt_lock);
231 tgt->flush_in_prog = 0;
232 spin_unlock_bh(&tgt->tgt_lock);
233}
234
235static void bnx2fc_upload_session(struct fcoe_port *port,
236 struct bnx2fc_rport *tgt)
237{
238 struct bnx2fc_hba *hba = port->priv;
239
240 BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
241 tgt->num_active_ios.counter);
242
243 /*
244 * Called with hba->hba_mutex held.
245 * This is a blocking call
246 */
247 clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
248 bnx2fc_send_session_disable_req(port, tgt);
249
250 /*
251 * wait for upload to complete. 3 Secs
252 * should be sufficient time for this process to complete.
253 */
254 setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
255 mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
256
257 BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
258 wait_event_interruptible(tgt->upld_wait,
259 (test_bit(
260 BNX2FC_FLAG_UPLD_REQ_COMPL,
261 &tgt->flags)));
262
263 if (signal_pending(current))
264 flush_signals(current);
265
266 del_timer_sync(&tgt->upld_timer);
267
268 /*
269 * traverse thru the active_q and tmf_q and cleanup
270 * IOs in these lists
271 */
272 BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n",
273 tgt->flags);
274 bnx2fc_flush_active_ios(tgt);
275
276 /* Issue destroy KWQE */
277 if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) {
278 BNX2FC_TGT_DBG(tgt, "send destroy req\n");
279 clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
280 bnx2fc_send_session_destroy_req(hba, tgt);
281
282 /* wait for destroy to complete */
283 setup_timer(&tgt->upld_timer,
284 bnx2fc_upld_timer, (unsigned long)tgt);
285 mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
286
287 wait_event_interruptible(tgt->upld_wait,
288 (test_bit(
289 BNX2FC_FLAG_UPLD_REQ_COMPL,
290 &tgt->flags)));
291
292 if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
293 printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
294
295 BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
296 tgt->flags);
297 if (signal_pending(current))
298 flush_signals(current);
299
300 del_timer_sync(&tgt->upld_timer);
301
302 } else
303 printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy"
304 " not sent to FW\n");
305
306 /* Free session resources */
307 spin_lock_bh(&tgt->cq_lock);
308 bnx2fc_free_session_resc(hba, tgt);
309 bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
310 spin_unlock_bh(&tgt->cq_lock);
311}
312
313static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
314 struct fcoe_port *port,
315 struct fc_rport_priv *rdata)
316{
317
318 struct fc_rport *rport = rdata->rport;
319 struct bnx2fc_hba *hba = port->priv;
320
321 tgt->rport = rport;
322 tgt->rdata = rdata;
323 tgt->port = port;
324
325 if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) {
326 BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n");
327 tgt->fcoe_conn_id = -1;
328 return -1;
329 }
330
331 tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt);
332 if (tgt->fcoe_conn_id == -1)
333 return -1;
334
335 BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id);
336
337 tgt->max_sqes = BNX2FC_SQ_WQES_MAX;
338 tgt->max_rqes = BNX2FC_RQ_WQES_MAX;
339 tgt->max_cqes = BNX2FC_CQ_WQES_MAX;
340
341 /* Initialize the toggle bit */
342 tgt->sq_curr_toggle_bit = 1;
343 tgt->cq_curr_toggle_bit = 1;
344 tgt->sq_prod_idx = 0;
345 tgt->cq_cons_idx = 0;
346 tgt->rq_prod_idx = 0x8000;
347 tgt->rq_cons_idx = 0;
348 atomic_set(&tgt->num_active_ios, 0);
349
350 tgt->work_time_slice = 2;
351
352 spin_lock_init(&tgt->tgt_lock);
353 spin_lock_init(&tgt->cq_lock);
354
355 /* Initialize active_cmd_queue list */
356 INIT_LIST_HEAD(&tgt->active_cmd_queue);
357
358 /* Initialize IO retire queue */
359 INIT_LIST_HEAD(&tgt->io_retire_queue);
360
361 INIT_LIST_HEAD(&tgt->els_queue);
362
363 /* Initialize active_tm_queue list */
364 INIT_LIST_HEAD(&tgt->active_tm_queue);
365
366 init_waitqueue_head(&tgt->ofld_wait);
367 init_waitqueue_head(&tgt->upld_wait);
368
369 return 0;
370}
371
372/**
373 * This event_callback is called after successful completion of libfc
374 * initiated target login. bnx2fc can proceed with initiating the session
375 * establishment.
376 */
377void bnx2fc_rport_event_handler(struct fc_lport *lport,
378 struct fc_rport_priv *rdata,
379 enum fc_rport_event event)
380{
381 struct fcoe_port *port = lport_priv(lport);
382 struct bnx2fc_hba *hba = port->priv;
383 struct fc_rport *rport = rdata->rport;
384 struct fc_rport_libfc_priv *rp;
385 struct bnx2fc_rport *tgt;
386 u32 port_id;
387
388 BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n",
389 event, rdata->ids.port_id);
390 switch (event) {
391 case RPORT_EV_READY:
392 if (!rport) {
393 printk(KERN_ALERT PFX "rport is NULL: ERROR!\n");
394 break;
395 }
396
397 rp = rport->dd_data;
398 if (rport->port_id == FC_FID_DIR_SERV) {
399 /*
400 * bnx2fc_rport structure doesnt exist for
401 * directory server.
402 * We should not come here, as lport will
403 * take care of fabric login
404 */
405 printk(KERN_ALERT PFX "%x - rport_event_handler ERROR\n",
406 rdata->ids.port_id);
407 break;
408 }
409
410 if (rdata->spp_type != FC_TYPE_FCP) {
411 BNX2FC_HBA_DBG(lport, "not FCP type target."
412 " not offloading\n");
413 break;
414 }
415 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
416 BNX2FC_HBA_DBG(lport, "not FCP_TARGET"
417 " not offloading\n");
418 break;
419 }
420
421 /*
422 * Offlaod process is protected with hba mutex.
423 * Use the same mutex_lock for upload process too
424 */
425 mutex_lock(&hba->hba_mutex);
426 tgt = (struct bnx2fc_rport *)&rp[1];
427
428 /* This can happen when ADISC finds the same target */
429 if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
430 BNX2FC_TGT_DBG(tgt, "already offloaded\n");
431 mutex_unlock(&hba->hba_mutex);
432 return;
433 }
434
435 /*
436 * Offload the session. This is a blocking call, and will
437 * wait until the session is offloaded.
438 */
439 bnx2fc_offload_session(port, tgt, rdata);
440
441 BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
442 hba->num_ofld_sess);
443
444 if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
445 /*
446 * Session is offloaded and enabled. Map
447 * doorbell register for this target
448 */
449 BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
450 /* This counter is protected with hba mutex */
451 hba->num_ofld_sess++;
452
453 set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
454 } else {
455 /*
456 * Offload or enable would have failed.
457 * In offload/enable completion path, the
458 * rport would have already been removed
459 */
460 BNX2FC_TGT_DBG(tgt, "Port is being logged off as "
461 "offloaded flag not set\n");
462 }
463 mutex_unlock(&hba->hba_mutex);
464 break;
465 case RPORT_EV_LOGO:
466 case RPORT_EV_FAILED:
467 case RPORT_EV_STOP:
468 port_id = rdata->ids.port_id;
469 if (port_id == FC_FID_DIR_SERV)
470 break;
471
472 if (!rport) {
473 printk(KERN_ALERT PFX "%x - rport not created Yet!!\n",
474 port_id);
475 break;
476 }
477 rp = rport->dd_data;
478 mutex_lock(&hba->hba_mutex);
479 /*
480 * Perform session upload. Note that rdata->peers is already
481 * removed from disc->rports list before we get this event.
482 */
483 tgt = (struct bnx2fc_rport *)&rp[1];
484
485 if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
486 mutex_unlock(&hba->hba_mutex);
487 break;
488 }
489 clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
490
491 bnx2fc_upload_session(port, tgt);
492 hba->num_ofld_sess--;
493 BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n",
494 hba->num_ofld_sess);
495 /*
496 * Try to wake up the linkdown wait thread. If num_ofld_sess
497 * is 0, the waiting therad wakes up
498 */
499 if ((hba->wait_for_link_down) &&
500 (hba->num_ofld_sess == 0)) {
501 wake_up_interruptible(&hba->shutdown_wait);
502 }
503 if (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags)) {
504 printk(KERN_ERR PFX "Relogin to the tgt\n");
505 mutex_lock(&lport->disc.disc_mutex);
506 lport->tt.rport_login(rdata);
507 mutex_unlock(&lport->disc.disc_mutex);
508 }
509 mutex_unlock(&hba->hba_mutex);
510
511 break;
512
513 case RPORT_EV_NONE:
514 break;
515 }
516}
517
518/**
519 * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id
520 *
521 * @port: fcoe_port struct to lookup the target port on
522 * @port_id: The remote port ID to look up
523 */
524struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
525 u32 port_id)
526{
527 struct bnx2fc_hba *hba = port->priv;
528 struct bnx2fc_rport *tgt;
529 struct fc_rport_priv *rdata;
530 int i;
531
532 for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
533 tgt = hba->tgt_ofld_list[i];
534 if ((tgt) && (tgt->port == port)) {
535 rdata = tgt->rdata;
536 if (rdata->ids.port_id == port_id) {
537 if (rdata->rp_state != RPORT_ST_DELETE) {
538 BNX2FC_TGT_DBG(tgt, "rport "
539 "obtained\n");
540 return tgt;
541 } else {
542 printk(KERN_ERR PFX "rport 0x%x "
543 "is in DELETED state\n",
544 rdata->ids.port_id);
545 return NULL;
546 }
547 }
548 }
549 }
550 return NULL;
551}
552
553
554/**
555 * bnx2fc_alloc_conn_id - allocates FCOE Connection id
556 *
557 * @hba: pointer to adapter structure
558 * @tgt: pointer to bnx2fc_rport structure
559 */
560static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
561 struct bnx2fc_rport *tgt)
562{
563 u32 conn_id, next;
564
565 /* called with hba mutex held */
566
567 /*
568 * tgt_ofld_list access is synchronized using
569 * both hba mutex and hba lock. Atleast hba mutex or
570 * hba lock needs to be held for read access.
571 */
572
573 spin_lock_bh(&hba->hba_lock);
574 next = hba->next_conn_id;
575 conn_id = hba->next_conn_id++;
576 if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS)
577 hba->next_conn_id = 0;
578
579 while (hba->tgt_ofld_list[conn_id] != NULL) {
580 conn_id++;
581 if (conn_id == BNX2FC_NUM_MAX_SESS)
582 conn_id = 0;
583
584 if (conn_id == next) {
585 /* No free conn_ids are available */
586 spin_unlock_bh(&hba->hba_lock);
587 return -1;
588 }
589 }
590 hba->tgt_ofld_list[conn_id] = tgt;
591 tgt->fcoe_conn_id = conn_id;
592 spin_unlock_bh(&hba->hba_lock);
593 return conn_id;
594}
595
596static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id)
597{
598 /* called with hba mutex held */
599 spin_lock_bh(&hba->hba_lock);
600 hba->tgt_ofld_list[conn_id] = NULL;
601 hba->next_conn_id = conn_id;
602 spin_unlock_bh(&hba->hba_lock);
603}
604
605/**
606 *bnx2fc_alloc_session_resc - Allocate qp resources for the session
607 *
608 */
609static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
610 struct bnx2fc_rport *tgt)
611{
612 dma_addr_t page;
613 int num_pages;
614 u32 *pbl;
615
616 /* Allocate and map SQ */
617 tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
618 tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
619
620 tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
621 &tgt->sq_dma, GFP_KERNEL);
622 if (!tgt->sq) {
623 printk(KERN_ALERT PFX "unable to allocate SQ memory %d\n",
624 tgt->sq_mem_size);
625 goto mem_alloc_failure;
626 }
627 memset(tgt->sq, 0, tgt->sq_mem_size);
628
629 /* Allocate and map CQ */
630 tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
631 tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
632
633 tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
634 &tgt->cq_dma, GFP_KERNEL);
635 if (!tgt->cq) {
636 printk(KERN_ALERT PFX "unable to allocate CQ memory %d\n",
637 tgt->cq_mem_size);
638 goto mem_alloc_failure;
639 }
640 memset(tgt->cq, 0, tgt->cq_mem_size);
641
642 /* Allocate and map RQ and RQ PBL */
643 tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
644 tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
645
646 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
647 &tgt->rq_dma, GFP_KERNEL);
648 if (!tgt->rq) {
649 printk(KERN_ALERT PFX "unable to allocate RQ memory %d\n",
650 tgt->rq_mem_size);
651 goto mem_alloc_failure;
652 }
653 memset(tgt->rq, 0, tgt->rq_mem_size);
654
655 tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *);
656 tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
657
658 tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
659 &tgt->rq_pbl_dma, GFP_KERNEL);
660 if (!tgt->rq_pbl) {
661 printk(KERN_ALERT PFX "unable to allocate RQ PBL %d\n",
662 tgt->rq_pbl_size);
663 goto mem_alloc_failure;
664 }
665
666 memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
667 num_pages = tgt->rq_mem_size / PAGE_SIZE;
668 page = tgt->rq_dma;
669 pbl = (u32 *)tgt->rq_pbl;
670
671 while (num_pages--) {
672 *pbl = (u32)page;
673 pbl++;
674 *pbl = (u32)((u64)page >> 32);
675 pbl++;
676 page += PAGE_SIZE;
677 }
678
679 /* Allocate and map XFERQ */
680 tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
681 tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) &
682 PAGE_MASK;
683
684 tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
685 &tgt->xferq_dma, GFP_KERNEL);
686 if (!tgt->xferq) {
687 printk(KERN_ALERT PFX "unable to allocate XFERQ %d\n",
688 tgt->xferq_mem_size);
689 goto mem_alloc_failure;
690 }
691 memset(tgt->xferq, 0, tgt->xferq_mem_size);
692
693 /* Allocate and map CONFQ & CONFQ PBL */
694 tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
695 tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) &
696 PAGE_MASK;
697
698 tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
699 &tgt->confq_dma, GFP_KERNEL);
700 if (!tgt->confq) {
701 printk(KERN_ALERT PFX "unable to allocate CONFQ %d\n",
702 tgt->confq_mem_size);
703 goto mem_alloc_failure;
704 }
705 memset(tgt->confq, 0, tgt->confq_mem_size);
706
707 tgt->confq_pbl_size =
708 (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *);
709 tgt->confq_pbl_size =
710 (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
711
712 tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
713 tgt->confq_pbl_size,
714 &tgt->confq_pbl_dma, GFP_KERNEL);
715 if (!tgt->confq_pbl) {
716 printk(KERN_ALERT PFX "unable to allocate CONFQ PBL %d\n",
717 tgt->confq_pbl_size);
718 goto mem_alloc_failure;
719 }
720
721 memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
722 num_pages = tgt->confq_mem_size / PAGE_SIZE;
723 page = tgt->confq_dma;
724 pbl = (u32 *)tgt->confq_pbl;
725
726 while (num_pages--) {
727 *pbl = (u32)page;
728 pbl++;
729 *pbl = (u32)((u64)page >> 32);
730 pbl++;
731 page += PAGE_SIZE;
732 }
733
734 /* Allocate and map ConnDB */
735 tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
736
737 tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
738 tgt->conn_db_mem_size,
739 &tgt->conn_db_dma, GFP_KERNEL);
740 if (!tgt->conn_db) {
741 printk(KERN_ALERT PFX "unable to allocate conn_db %d\n",
742 tgt->conn_db_mem_size);
743 goto mem_alloc_failure;
744 }
745 memset(tgt->conn_db, 0, tgt->conn_db_mem_size);
746
747
748 /* Allocate and map LCQ */
749 tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
750 tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) &
751 PAGE_MASK;
752
753 tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
754 &tgt->lcq_dma, GFP_KERNEL);
755
756 if (!tgt->lcq) {
757 printk(KERN_ALERT PFX "unable to allocate lcq %d\n",
758 tgt->lcq_mem_size);
759 goto mem_alloc_failure;
760 }
761 memset(tgt->lcq, 0, tgt->lcq_mem_size);
762
763 /* Arm CQ */
764 tgt->conn_db->cq_arm.lo = -1;
765 tgt->conn_db->rq_prod = 0x8000;
766
767 return 0;
768
769mem_alloc_failure:
770 bnx2fc_free_session_resc(hba, tgt);
771 bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
772 return -ENOMEM;
773}
774
775/**
776 * bnx2i_free_session_resc - free qp resources for the session
777 *
778 * @hba: adapter structure pointer
779 * @tgt: bnx2fc_rport structure pointer
780 *
781 * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL
782 */
783static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
784 struct bnx2fc_rport *tgt)
785{
786 BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
787
788 if (tgt->ctx_base) {
789 iounmap(tgt->ctx_base);
790 tgt->ctx_base = NULL;
791 }
792 /* Free LCQ */
793 if (tgt->lcq) {
794 dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
795 tgt->lcq, tgt->lcq_dma);
796 tgt->lcq = NULL;
797 }
798 /* Free connDB */
799 if (tgt->conn_db) {
800 dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size,
801 tgt->conn_db, tgt->conn_db_dma);
802 tgt->conn_db = NULL;
803 }
804 /* Free confq and confq pbl */
805 if (tgt->confq_pbl) {
806 dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size,
807 tgt->confq_pbl, tgt->confq_pbl_dma);
808 tgt->confq_pbl = NULL;
809 }
810 if (tgt->confq) {
811 dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
812 tgt->confq, tgt->confq_dma);
813 tgt->confq = NULL;
814 }
815 /* Free XFERQ */
816 if (tgt->xferq) {
817 dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
818 tgt->xferq, tgt->xferq_dma);
819 tgt->xferq = NULL;
820 }
821 /* Free RQ PBL and RQ */
822 if (tgt->rq_pbl) {
823 dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
824 tgt->rq_pbl, tgt->rq_pbl_dma);
825 tgt->rq_pbl = NULL;
826 }
827 if (tgt->rq) {
828 dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
829 tgt->rq, tgt->rq_dma);
830 tgt->rq = NULL;
831 }
832 /* Free CQ */
833 if (tgt->cq) {
834 dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
835 tgt->cq, tgt->cq_dma);
836 tgt->cq = NULL;
837 }
838 /* Free SQ */
839 if (tgt->sq) {
840 dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
841 tgt->sq, tgt->sq_dma);
842 tgt->sq = NULL;
843 }
844}