aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/bnx2fc/bnx2fc_hwi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/bnx2fc/bnx2fc_hwi.c')
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c1868
1 files changed, 1868 insertions, 0 deletions
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
new file mode 100644
index 000000000000..4f4096836742
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -0,0 +1,1868 @@
1/* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * This file contains the code that low level functions that interact
3 * with 57712 FCoE firmware.
4 *
5 * Copyright (c) 2008 - 2010 Broadcom Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
12 */
13
14#include "bnx2fc.h"
15
16DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
17
18static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
19 struct fcoe_kcqe *new_cqe_kcqe);
20static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
21 struct fcoe_kcqe *ofld_kcqe);
22static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
23 struct fcoe_kcqe *ofld_kcqe);
24static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
25static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
26 struct fcoe_kcqe *conn_destroy);
27
28int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
29{
30 struct fcoe_kwqe_stat stat_req;
31 struct kwqe *kwqe_arr[2];
32 int num_kwqes = 1;
33 int rc = 0;
34
35 memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
36 stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
37 stat_req.hdr.flags =
38 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
39
40 stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
41 stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
42
43 kwqe_arr[0] = (struct kwqe *) &stat_req;
44
45 if (hba->cnic && hba->cnic->submit_kwqes)
46 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
47
48 return rc;
49}
50
51/**
52 * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
53 *
54 * @hba: adapter structure pointer
55 *
56 * Send down FCoE firmware init KWQEs which initiates the initial handshake
57 * with the f/w.
58 *
59 */
60int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
61{
62 struct fcoe_kwqe_init1 fcoe_init1;
63 struct fcoe_kwqe_init2 fcoe_init2;
64 struct fcoe_kwqe_init3 fcoe_init3;
65 struct kwqe *kwqe_arr[3];
66 int num_kwqes = 3;
67 int rc = 0;
68
69 if (!hba->cnic) {
70 printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n");
71 return -ENODEV;
72 }
73
74 /* fill init1 KWQE */
75 memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
76 fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
77 fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
78 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
79
80 fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
81 fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
82 fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
83 fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
84 fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
85 fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
86 fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
87 fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
88 fcoe_init1.task_list_pbl_addr_hi =
89 (u32) ((u64) hba->task_ctx_bd_dma >> 32);
90 fcoe_init1.mtu = hba->netdev->mtu;
91
92 fcoe_init1.flags = (PAGE_SHIFT <<
93 FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
94
95 fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
96
97 /* fill init2 KWQE */
98 memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
99 fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
100 fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
101 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
102
103 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
104 fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
105 ((u64) hba->hash_tbl_pbl_dma >> 32);
106
107 fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
108 fcoe_init2.t2_hash_tbl_addr_hi = (u32)
109 ((u64) hba->t2_hash_tbl_dma >> 32);
110
111 fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
112 fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
113 ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
114
115 fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
116
117 /* fill init3 KWQE */
118 memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
119 fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
120 fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
121 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
122 fcoe_init3.error_bit_map_lo = 0xffffffff;
123 fcoe_init3.error_bit_map_hi = 0xffffffff;
124
125
126 kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
127 kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
128 kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
129
130 if (hba->cnic && hba->cnic->submit_kwqes)
131 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
132
133 return rc;
134}
135int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
136{
137 struct fcoe_kwqe_destroy fcoe_destroy;
138 struct kwqe *kwqe_arr[2];
139 int num_kwqes = 1;
140 int rc = -1;
141
142 /* fill destroy KWQE */
143 memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
144 fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
145 fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
146 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
147 kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
148
149 if (hba->cnic && hba->cnic->submit_kwqes)
150 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
151 return rc;
152}
153
154/**
155 * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
156 *
157 * @port: port structure pointer
158 * @tgt: bnx2fc_rport structure pointer
159 */
160int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
161 struct bnx2fc_rport *tgt)
162{
163 struct fc_lport *lport = port->lport;
164 struct bnx2fc_hba *hba = port->priv;
165 struct kwqe *kwqe_arr[4];
166 struct fcoe_kwqe_conn_offload1 ofld_req1;
167 struct fcoe_kwqe_conn_offload2 ofld_req2;
168 struct fcoe_kwqe_conn_offload3 ofld_req3;
169 struct fcoe_kwqe_conn_offload4 ofld_req4;
170 struct fc_rport_priv *rdata = tgt->rdata;
171 struct fc_rport *rport = tgt->rport;
172 int num_kwqes = 4;
173 u32 port_id;
174 int rc = 0;
175 u16 conn_id;
176
177 /* Initialize offload request 1 structure */
178 memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
179
180 ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
181 ofld_req1.hdr.flags =
182 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
183
184
185 conn_id = (u16)tgt->fcoe_conn_id;
186 ofld_req1.fcoe_conn_id = conn_id;
187
188
189 ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
190 ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
191
192 ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
193 ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
194
195 ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
196 ofld_req1.rq_first_pbe_addr_hi =
197 (u32)((u64) tgt->rq_dma >> 32);
198
199 ofld_req1.rq_prod = 0x8000;
200
201 /* Initialize offload request 2 structure */
202 memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
203
204 ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
205 ofld_req2.hdr.flags =
206 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
207
208 ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
209
210 ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
211 ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
212
213 ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
214 ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
215
216 ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
217 ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
218
219 /* Initialize offload request 3 structure */
220 memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
221
222 ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
223 ofld_req3.hdr.flags =
224 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
225
226 ofld_req3.vlan_tag = hba->vlan_id <<
227 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
228 ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
229
230 port_id = fc_host_port_id(lport->host);
231 if (port_id == 0) {
232 BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
233 return -EINVAL;
234 }
235
236 /*
237 * Store s_id of the initiator for further reference. This will
238 * be used during disable/destroy during linkdown processing as
239 * when the lport is reset, the port_id also is reset to 0
240 */
241 tgt->sid = port_id;
242 ofld_req3.s_id[0] = (port_id & 0x000000FF);
243 ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
244 ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
245
246 port_id = rport->port_id;
247 ofld_req3.d_id[0] = (port_id & 0x000000FF);
248 ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
249 ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
250
251 ofld_req3.tx_total_conc_seqs = rdata->max_seq;
252
253 ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
254 ofld_req3.rx_max_fc_pay_len = lport->mfs;
255
256 ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
257 ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
258 ofld_req3.rx_open_seqs_exch_c3 = 1;
259
260 ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
261 ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
262
263 /* set mul_n_port_ids supported flag to 0, until it is supported */
264 ofld_req3.flags = 0;
265 /*
266 ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
267 FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
268 */
269 /* Info from PLOGI response */
270 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
271 FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
272
273 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
274 FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
275
276 /* vlan flag */
277 ofld_req3.flags |= (hba->vlan_enabled <<
278 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
279
280 /* C2_VALID and ACK flags are not set as they are not suppported */
281
282
283 /* Initialize offload request 4 structure */
284 memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
285 ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
286 ofld_req4.hdr.flags =
287 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
288
289 ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
290
291
292 ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5];
293 /* local mac */
294 ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4];
295 ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3];
296 ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2];
297 ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1];
298 ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0];
299 ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
300 ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
301 ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
302 ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
303 ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
304 ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
305
306 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
307 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
308
309 ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
310 ofld_req4.confq_pbl_base_addr_hi =
311 (u32)((u64) tgt->confq_pbl_dma >> 32);
312
313 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
314 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
315 kwqe_arr[2] = (struct kwqe *) &ofld_req3;
316 kwqe_arr[3] = (struct kwqe *) &ofld_req4;
317
318 if (hba->cnic && hba->cnic->submit_kwqes)
319 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
320
321 return rc;
322}
323
324/**
325 * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
326 *
327 * @port: port structure pointer
328 * @tgt: bnx2fc_rport structure pointer
329 */
330static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
331 struct bnx2fc_rport *tgt)
332{
333 struct kwqe *kwqe_arr[2];
334 struct bnx2fc_hba *hba = port->priv;
335 struct fcoe_kwqe_conn_enable_disable enbl_req;
336 struct fc_lport *lport = port->lport;
337 struct fc_rport *rport = tgt->rport;
338 int num_kwqes = 1;
339 int rc = 0;
340 u32 port_id;
341
342 memset(&enbl_req, 0x00,
343 sizeof(struct fcoe_kwqe_conn_enable_disable));
344 enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
345 enbl_req.hdr.flags =
346 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
347
348 enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
349 /* local mac */
350 enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4];
351 enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
352 enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
353 enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
354 enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
355
356 enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
357 enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
358 enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
359 enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
360 enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
361 enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
362
363 port_id = fc_host_port_id(lport->host);
364 if (port_id != tgt->sid) {
365 printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
366 "sid = 0x%x\n", port_id, tgt->sid);
367 port_id = tgt->sid;
368 }
369 enbl_req.s_id[0] = (port_id & 0x000000FF);
370 enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
371 enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
372
373 port_id = rport->port_id;
374 enbl_req.d_id[0] = (port_id & 0x000000FF);
375 enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
376 enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
377 enbl_req.vlan_tag = hba->vlan_id <<
378 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
379 enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
380 enbl_req.vlan_flag = hba->vlan_enabled;
381 enbl_req.context_id = tgt->context_id;
382 enbl_req.conn_id = tgt->fcoe_conn_id;
383
384 kwqe_arr[0] = (struct kwqe *) &enbl_req;
385
386 if (hba->cnic && hba->cnic->submit_kwqes)
387 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
388 return rc;
389}
390
391/**
392 * bnx2fc_send_session_disable_req - initiates FCoE Session disable
393 *
394 * @port: port structure pointer
395 * @tgt: bnx2fc_rport structure pointer
396 */
397int bnx2fc_send_session_disable_req(struct fcoe_port *port,
398 struct bnx2fc_rport *tgt)
399{
400 struct bnx2fc_hba *hba = port->priv;
401 struct fcoe_kwqe_conn_enable_disable disable_req;
402 struct kwqe *kwqe_arr[2];
403 struct fc_rport *rport = tgt->rport;
404 int num_kwqes = 1;
405 int rc = 0;
406 u32 port_id;
407
408 memset(&disable_req, 0x00,
409 sizeof(struct fcoe_kwqe_conn_enable_disable));
410 disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
411 disable_req.hdr.flags =
412 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
413
414 disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
415 disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
416 disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
417 disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
418 disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
419
420 disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
421 disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
422 disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
423 disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
424 disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
425 disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
426
427 port_id = tgt->sid;
428 disable_req.s_id[0] = (port_id & 0x000000FF);
429 disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
430 disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
431
432
433 port_id = rport->port_id;
434 disable_req.d_id[0] = (port_id & 0x000000FF);
435 disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
436 disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
437 disable_req.context_id = tgt->context_id;
438 disable_req.conn_id = tgt->fcoe_conn_id;
439 disable_req.vlan_tag = hba->vlan_id <<
440 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
441 disable_req.vlan_tag |=
442 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
443 disable_req.vlan_flag = hba->vlan_enabled;
444
445 kwqe_arr[0] = (struct kwqe *) &disable_req;
446
447 if (hba->cnic && hba->cnic->submit_kwqes)
448 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
449
450 return rc;
451}
452
453/**
454 * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
455 *
456 * @port: port structure pointer
457 * @tgt: bnx2fc_rport structure pointer
458 */
459int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
460 struct bnx2fc_rport *tgt)
461{
462 struct fcoe_kwqe_conn_destroy destroy_req;
463 struct kwqe *kwqe_arr[2];
464 int num_kwqes = 1;
465 int rc = 0;
466
467 memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
468 destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
469 destroy_req.hdr.flags =
470 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
471
472 destroy_req.context_id = tgt->context_id;
473 destroy_req.conn_id = tgt->fcoe_conn_id;
474
475 kwqe_arr[0] = (struct kwqe *) &destroy_req;
476
477 if (hba->cnic && hba->cnic->submit_kwqes)
478 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
479
480 return rc;
481}
482
483static void bnx2fc_unsol_els_work(struct work_struct *work)
484{
485 struct bnx2fc_unsol_els *unsol_els;
486 struct fc_lport *lport;
487 struct fc_frame *fp;
488
489 unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
490 lport = unsol_els->lport;
491 fp = unsol_els->fp;
492 fc_exch_recv(lport, fp);
493 kfree(unsol_els);
494}
495
496void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
497 unsigned char *buf,
498 u32 frame_len, u16 l2_oxid)
499{
500 struct fcoe_port *port = tgt->port;
501 struct fc_lport *lport = port->lport;
502 struct bnx2fc_unsol_els *unsol_els;
503 struct fc_frame_header *fh;
504 struct fc_frame *fp;
505 struct sk_buff *skb;
506 u32 payload_len;
507 u32 crc;
508 u8 op;
509
510
511 unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
512 if (!unsol_els) {
513 BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
514 return;
515 }
516
517 BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
518 l2_oxid, frame_len);
519
520 payload_len = frame_len - sizeof(struct fc_frame_header);
521
522 fp = fc_frame_alloc(lport, payload_len);
523 if (!fp) {
524 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
525 return;
526 }
527
528 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
529 /* Copy FC Frame header and payload into the frame */
530 memcpy(fh, buf, frame_len);
531
532 if (l2_oxid != FC_XID_UNKNOWN)
533 fh->fh_ox_id = htons(l2_oxid);
534
535 skb = fp_skb(fp);
536
537 if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
538 (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
539
540 if (fh->fh_type == FC_TYPE_ELS) {
541 op = fc_frame_payload_op(fp);
542 if ((op == ELS_TEST) || (op == ELS_ESTC) ||
543 (op == ELS_FAN) || (op == ELS_CSU)) {
544 /*
545 * No need to reply for these
546 * ELS requests
547 */
548 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
549 kfree_skb(skb);
550 return;
551 }
552 }
553 crc = fcoe_fc_crc(fp);
554 fc_frame_init(fp);
555 fr_dev(fp) = lport;
556 fr_sof(fp) = FC_SOF_I3;
557 fr_eof(fp) = FC_EOF_T;
558 fr_crc(fp) = cpu_to_le32(~crc);
559 unsol_els->lport = lport;
560 unsol_els->fp = fp;
561 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
562 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
563 } else {
564 BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
565 kfree_skb(skb);
566 }
567}
568
569static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
570{
571 u8 num_rq;
572 struct fcoe_err_report_entry *err_entry;
573 unsigned char *rq_data;
574 unsigned char *buf = NULL, *buf1;
575 int i;
576 u16 xid;
577 u32 frame_len, len;
578 struct bnx2fc_cmd *io_req = NULL;
579 struct fcoe_task_ctx_entry *task, *task_page;
580 struct bnx2fc_hba *hba = tgt->port->priv;
581 int task_idx, index;
582 int rc = 0;
583
584
585 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
586 switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
587 case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
588 frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
589 FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
590
591 num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
592
593 rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
594 if (rq_data) {
595 buf = rq_data;
596 } else {
597 buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
598 GFP_ATOMIC);
599
600 if (!buf1) {
601 BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
602 break;
603 }
604
605 for (i = 0; i < num_rq; i++) {
606 rq_data = (unsigned char *)
607 bnx2fc_get_next_rqe(tgt, 1);
608 len = BNX2FC_RQ_BUF_SZ;
609 memcpy(buf1, rq_data, len);
610 buf1 += len;
611 }
612 }
613 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
614 FC_XID_UNKNOWN);
615
616 if (buf != rq_data)
617 kfree(buf);
618 bnx2fc_return_rqe(tgt, num_rq);
619 break;
620
621 case FCOE_ERROR_DETECTION_CQE_TYPE:
622 /*
623 *In case of error reporting CQE a single RQ entry
624 * is consumes.
625 */
626 spin_lock_bh(&tgt->tgt_lock);
627 num_rq = 1;
628 err_entry = (struct fcoe_err_report_entry *)
629 bnx2fc_get_next_rqe(tgt, 1);
630 xid = err_entry->fc_hdr.ox_id;
631 BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
632 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
633 err_entry->err_warn_bitmap_hi,
634 err_entry->err_warn_bitmap_lo);
635 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
636 err_entry->tx_buf_off, err_entry->rx_buf_off);
637
638 bnx2fc_return_rqe(tgt, 1);
639
640 if (xid > BNX2FC_MAX_XID) {
641 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
642 xid);
643 spin_unlock_bh(&tgt->tgt_lock);
644 break;
645 }
646
647 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
648 index = xid % BNX2FC_TASKS_PER_PAGE;
649 task_page = (struct fcoe_task_ctx_entry *)
650 hba->task_ctx[task_idx];
651 task = &(task_page[index]);
652
653 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
654 if (!io_req) {
655 spin_unlock_bh(&tgt->tgt_lock);
656 break;
657 }
658
659 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
660 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
661 spin_unlock_bh(&tgt->tgt_lock);
662 break;
663 }
664
665 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
666 &io_req->req_flags)) {
667 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
668 "progress.. ignore unsol err\n");
669 spin_unlock_bh(&tgt->tgt_lock);
670 break;
671 }
672
673 /*
674 * If ABTS is already in progress, and FW error is
675 * received after that, do not cancel the timeout_work
676 * and let the error recovery continue by explicitly
677 * logging out the target, when the ABTS eventually
678 * times out.
679 */
680 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
681 &io_req->req_flags)) {
682 /*
683 * Cancel the timeout_work, as we received IO
684 * completion with FW error.
685 */
686 if (cancel_delayed_work(&io_req->timeout_work))
687 kref_put(&io_req->refcount,
688 bnx2fc_cmd_release); /* timer hold */
689
690 rc = bnx2fc_initiate_abts(io_req);
691 if (rc != SUCCESS) {
692 BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
693 "failed. issue cleanup\n");
694 rc = bnx2fc_initiate_cleanup(io_req);
695 BUG_ON(rc);
696 }
697 } else
698 printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
699 "in ABTS processing\n", xid);
700 spin_unlock_bh(&tgt->tgt_lock);
701 break;
702
703 case FCOE_WARNING_DETECTION_CQE_TYPE:
704 /*
705 *In case of warning reporting CQE a single RQ entry
706 * is consumes.
707 */
708 num_rq = 1;
709 err_entry = (struct fcoe_err_report_entry *)
710 bnx2fc_get_next_rqe(tgt, 1);
711 xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
712 BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
713 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
714 err_entry->err_warn_bitmap_hi,
715 err_entry->err_warn_bitmap_lo);
716 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
717 err_entry->tx_buf_off, err_entry->rx_buf_off);
718
719 bnx2fc_return_rqe(tgt, 1);
720 break;
721
722 default:
723 printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
724 break;
725 }
726}
727
728void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
729{
730 struct fcoe_task_ctx_entry *task;
731 struct fcoe_task_ctx_entry *task_page;
732 struct fcoe_port *port = tgt->port;
733 struct bnx2fc_hba *hba = port->priv;
734 struct bnx2fc_cmd *io_req;
735 int task_idx, index;
736 u16 xid;
737 u8 cmd_type;
738 u8 rx_state = 0;
739 u8 num_rq;
740
741 spin_lock_bh(&tgt->tgt_lock);
742 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
743 if (xid >= BNX2FC_MAX_TASKS) {
744 printk(KERN_ALERT PFX "ERROR:xid out of range\n");
745 spin_unlock_bh(&tgt->tgt_lock);
746 return;
747 }
748 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
749 index = xid % BNX2FC_TASKS_PER_PAGE;
750 task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
751 task = &(task_page[index]);
752
753 num_rq = ((task->rx_wr_tx_rd.rx_flags &
754 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >>
755 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT);
756
757 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
758
759 if (io_req == NULL) {
760 printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
761 spin_unlock_bh(&tgt->tgt_lock);
762 return;
763 }
764
765 /* Timestamp IO completion time */
766 cmd_type = io_req->cmd_type;
767
768 /* optimized completion path */
769 if (cmd_type == BNX2FC_SCSI_CMD) {
770 rx_state = ((task->rx_wr_tx_rd.rx_flags &
771 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >>
772 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT);
773
774 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
775 bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
776 spin_unlock_bh(&tgt->tgt_lock);
777 return;
778 }
779 }
780
781 /* Process other IO completion types */
782 switch (cmd_type) {
783 case BNX2FC_SCSI_CMD:
784 if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
785 bnx2fc_process_abts_compl(io_req, task, num_rq);
786 else if (rx_state ==
787 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
788 bnx2fc_process_cleanup_compl(io_req, task, num_rq);
789 else
790 printk(KERN_ERR PFX "Invalid rx state - %d\n",
791 rx_state);
792 break;
793
794 case BNX2FC_TASK_MGMT_CMD:
795 BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
796 bnx2fc_process_tm_compl(io_req, task, num_rq);
797 break;
798
799 case BNX2FC_ABTS:
800 /*
801 * ABTS request received by firmware. ABTS response
802 * will be delivered to the task belonging to the IO
803 * that was aborted
804 */
805 BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
806 kref_put(&io_req->refcount, bnx2fc_cmd_release);
807 break;
808
809 case BNX2FC_ELS:
810 BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n");
811 bnx2fc_process_els_compl(io_req, task, num_rq);
812 break;
813
814 case BNX2FC_CLEANUP:
815 BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
816 kref_put(&io_req->refcount, bnx2fc_cmd_release);
817 break;
818
819 default:
820 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
821 break;
822 }
823 spin_unlock_bh(&tgt->tgt_lock);
824}
825
826struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
827{
828 struct bnx2fc_work *work;
829 work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
830 if (!work)
831 return NULL;
832
833 INIT_LIST_HEAD(&work->list);
834 work->tgt = tgt;
835 work->wqe = wqe;
836 return work;
837}
838
839int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
840{
841 struct fcoe_cqe *cq;
842 u32 cq_cons;
843 struct fcoe_cqe *cqe;
844 u16 wqe;
845 bool more_cqes_found = false;
846
847 /*
848 * cq_lock is a low contention lock used to protect
849 * the CQ data structure from being freed up during
850 * the upload operation
851 */
852 spin_lock_bh(&tgt->cq_lock);
853
854 if (!tgt->cq) {
855 printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
856 spin_unlock_bh(&tgt->cq_lock);
857 return 0;
858 }
859 cq = tgt->cq;
860 cq_cons = tgt->cq_cons_idx;
861 cqe = &cq[cq_cons];
862
863 do {
864 more_cqes_found ^= true;
865
866 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
867 (tgt->cq_curr_toggle_bit <<
868 FCOE_CQE_TOGGLE_BIT_SHIFT)) {
869
870 /* new entry on the cq */
871 if (wqe & FCOE_CQE_CQE_TYPE) {
872 /* Unsolicited event notification */
873 bnx2fc_process_unsol_compl(tgt, wqe);
874 } else {
875 struct bnx2fc_work *work = NULL;
876 struct bnx2fc_percpu_s *fps = NULL;
877 unsigned int cpu = wqe % num_possible_cpus();
878
879 fps = &per_cpu(bnx2fc_percpu, cpu);
880 spin_lock_bh(&fps->fp_work_lock);
881 if (unlikely(!fps->iothread))
882 goto unlock;
883
884 work = bnx2fc_alloc_work(tgt, wqe);
885 if (work)
886 list_add_tail(&work->list,
887 &fps->work_list);
888unlock:
889 spin_unlock_bh(&fps->fp_work_lock);
890
891 /* Pending work request completion */
892 if (fps->iothread && work)
893 wake_up_process(fps->iothread);
894 else
895 bnx2fc_process_cq_compl(tgt, wqe);
896 }
897 cqe++;
898 tgt->cq_cons_idx++;
899
900 if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
901 tgt->cq_cons_idx = 0;
902 cqe = cq;
903 tgt->cq_curr_toggle_bit =
904 1 - tgt->cq_curr_toggle_bit;
905 }
906 }
907 /* Re-arm CQ */
908 if (more_cqes_found) {
909 tgt->conn_db->cq_arm.lo = -1;
910 wmb();
911 }
912 } while (more_cqes_found);
913
914 /*
915 * Commit tgt->cq_cons_idx change to the memory
916 * spin_lock implies full memory barrier, no need to smp_wmb
917 */
918
919 spin_unlock_bh(&tgt->cq_lock);
920 return 0;
921}
922
923/**
924 * bnx2fc_fastpath_notification - process global event queue (KCQ)
925 *
926 * @hba: adapter structure pointer
927 * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry
928 *
929 * Fast path event notification handler
930 */
931static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
932 struct fcoe_kcqe *new_cqe_kcqe)
933{
934 u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
935 struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
936
937 if (!tgt) {
938 printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id);
939 return;
940 }
941
942 bnx2fc_process_new_cqes(tgt);
943}
944
945/**
946 * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
947 *
948 * @hba: adapter structure pointer
949 * @ofld_kcqe: connection offload kcqe pointer
950 *
951 * handle session offload completion, enable the session if offload is
952 * successful.
953 */
954static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
955 struct fcoe_kcqe *ofld_kcqe)
956{
957 struct bnx2fc_rport *tgt;
958 struct fcoe_port *port;
959 u32 conn_id;
960 u32 context_id;
961 int rc;
962
963 conn_id = ofld_kcqe->fcoe_conn_id;
964 context_id = ofld_kcqe->fcoe_conn_context_id;
965 tgt = hba->tgt_ofld_list[conn_id];
966 if (!tgt) {
967 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
968 return;
969 }
970 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
971 ofld_kcqe->fcoe_conn_context_id);
972 port = tgt->port;
973 if (hba != tgt->port->priv) {
974 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n");
975 goto ofld_cmpl_err;
976 }
977 /*
978 * cnic has allocated a context_id for this session; use this
979 * while enabling the session.
980 */
981 tgt->context_id = context_id;
982 if (ofld_kcqe->completion_status) {
983 if (ofld_kcqe->completion_status ==
984 FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
985 printk(KERN_ERR PFX "unable to allocate FCoE context "
986 "resources\n");
987 set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
988 }
989 goto ofld_cmpl_err;
990 } else {
991
992 /* now enable the session */
993 rc = bnx2fc_send_session_enable_req(port, tgt);
994 if (rc) {
995 printk(KERN_ALERT PFX "enable session failed\n");
996 goto ofld_cmpl_err;
997 }
998 }
999 return;
1000ofld_cmpl_err:
1001 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1002 wake_up_interruptible(&tgt->ofld_wait);
1003}
1004
1005/**
1006 * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1007 *
1008 * @hba: adapter structure pointer
1009 * @ofld_kcqe: connection offload kcqe pointer
1010 *
1011 * handle session enable completion, mark the rport as ready
1012 */
1013
1014static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1015 struct fcoe_kcqe *ofld_kcqe)
1016{
1017 struct bnx2fc_rport *tgt;
1018 u32 conn_id;
1019 u32 context_id;
1020
1021 context_id = ofld_kcqe->fcoe_conn_context_id;
1022 conn_id = ofld_kcqe->fcoe_conn_id;
1023 tgt = hba->tgt_ofld_list[conn_id];
1024 if (!tgt) {
1025 printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1026 return;
1027 }
1028
1029 BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
1030 ofld_kcqe->fcoe_conn_context_id);
1031
1032 /*
1033 * context_id should be the same for this target during offload
1034 * and enable
1035 */
1036 if (tgt->context_id != context_id) {
1037 printk(KERN_ALERT PFX "context id mis-match\n");
1038 return;
1039 }
1040 if (hba != tgt->port->priv) {
1041 printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1042 goto enbl_cmpl_err;
1043 }
1044 if (ofld_kcqe->completion_status) {
1045 goto enbl_cmpl_err;
1046 } else {
1047 /* enable successful - rport ready for issuing IOs */
1048 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1049 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1050 wake_up_interruptible(&tgt->ofld_wait);
1051 }
1052 return;
1053
1054enbl_cmpl_err:
1055 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1056 wake_up_interruptible(&tgt->ofld_wait);
1057}
1058
1059static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1060 struct fcoe_kcqe *disable_kcqe)
1061{
1062
1063 struct bnx2fc_rport *tgt;
1064 u32 conn_id;
1065
1066 conn_id = disable_kcqe->fcoe_conn_id;
1067 tgt = hba->tgt_ofld_list[conn_id];
1068 if (!tgt) {
1069 printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n");
1070 return;
1071 }
1072
1073 BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1074
1075 if (disable_kcqe->completion_status) {
1076 printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n",
1077 disable_kcqe->completion_status);
1078 return;
1079 } else {
1080 /* disable successful */
1081 BNX2FC_TGT_DBG(tgt, "disable successful\n");
1082 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1083 set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1084 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1085 wake_up_interruptible(&tgt->upld_wait);
1086 }
1087}
1088
1089static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1090 struct fcoe_kcqe *destroy_kcqe)
1091{
1092 struct bnx2fc_rport *tgt;
1093 u32 conn_id;
1094
1095 conn_id = destroy_kcqe->fcoe_conn_id;
1096 tgt = hba->tgt_ofld_list[conn_id];
1097 if (!tgt) {
1098 printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n");
1099 return;
1100 }
1101
1102 BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1103
1104 if (destroy_kcqe->completion_status) {
1105 printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n",
1106 destroy_kcqe->completion_status);
1107 return;
1108 } else {
1109 /* destroy successful */
1110 BNX2FC_TGT_DBG(tgt, "upload successful\n");
1111 clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1112 set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
1113 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1114 wake_up_interruptible(&tgt->upld_wait);
1115 }
1116}
1117
1118static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1119{
1120 switch (err_code) {
1121 case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
1122 printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
1123 break;
1124
1125 case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
1126 printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
1127 break;
1128
1129 case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1130 printk(KERN_ERR PFX "init_failure due to NIC error\n");
1131 break;
1132
1133 default:
1134 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1135 }
1136}
1137
1138/**
1139 * bnx2fc_indicae_kcqe - process KCQE
1140 *
1141 * @hba: adapter structure pointer
1142 * @kcqe: kcqe pointer
1143 * @num_cqe: Number of completion queue elements
1144 *
1145 * Generic KCQ event handler
1146 */
1147void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1148 u32 num_cqe)
1149{
1150 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1151 int i = 0;
1152 struct fcoe_kcqe *kcqe = NULL;
1153
1154 while (i < num_cqe) {
1155 kcqe = (struct fcoe_kcqe *) kcq[i++];
1156
1157 switch (kcqe->op_code) {
1158 case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
1159 bnx2fc_fastpath_notification(hba, kcqe);
1160 break;
1161
1162 case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1163 bnx2fc_process_ofld_cmpl(hba, kcqe);
1164 break;
1165
1166 case FCOE_KCQE_OPCODE_ENABLE_CONN:
1167 bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1168 break;
1169
1170 case FCOE_KCQE_OPCODE_INIT_FUNC:
1171 if (kcqe->completion_status !=
1172 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1173 bnx2fc_init_failure(hba,
1174 kcqe->completion_status);
1175 } else {
1176 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1177 bnx2fc_get_link_state(hba);
1178 printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
1179 (u8)hba->pcidev->bus->number);
1180 }
1181 break;
1182
1183 case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1184 if (kcqe->completion_status !=
1185 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1186
1187 printk(KERN_ERR PFX "DESTROY failed\n");
1188 } else {
1189 printk(KERN_ERR PFX "DESTROY success\n");
1190 }
1191 hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
1192 wake_up_interruptible(&hba->destroy_wait);
1193 break;
1194
1195 case FCOE_KCQE_OPCODE_DISABLE_CONN:
1196 bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1197 break;
1198
1199 case FCOE_KCQE_OPCODE_DESTROY_CONN:
1200 bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1201 break;
1202
1203 case FCOE_KCQE_OPCODE_STAT_FUNC:
1204 if (kcqe->completion_status !=
1205 FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
1206 printk(KERN_ERR PFX "STAT failed\n");
1207 complete(&hba->stat_req_done);
1208 break;
1209
1210 case FCOE_KCQE_OPCODE_FCOE_ERROR:
1211 /* fall thru */
1212 default:
1213 printk(KERN_ALERT PFX "unknown opcode 0x%x\n",
1214 kcqe->op_code);
1215 }
1216 }
1217}
1218
1219void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1220{
1221 struct fcoe_sqe *sqe;
1222
1223 sqe = &tgt->sq[tgt->sq_prod_idx];
1224
1225 /* Fill SQ WQE */
1226 sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1227 sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
1228
1229 /* Advance SQ Prod Idx */
1230 if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
1231 tgt->sq_prod_idx = 0;
1232 tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
1233 }
1234}
1235
1236void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1237{
1238 struct b577xx_doorbell_set_prod ev_doorbell;
1239 u32 msg;
1240
1241 wmb();
1242
1243 memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod));
1244 ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE;
1245
1246 ev_doorbell.prod = tgt->sq_prod_idx |
1247 (tgt->sq_curr_toggle_bit << 15);
1248 ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE <<
1249 B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
1250 msg = *((u32 *)&ev_doorbell);
1251 writel(cpu_to_le32(msg), tgt->ctx_base);
1252
1253 mmiowb();
1254
1255}
1256
1257int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1258{
1259 u32 context_id = tgt->context_id;
1260 struct fcoe_port *port = tgt->port;
1261 u32 reg_off;
1262 resource_size_t reg_base;
1263 struct bnx2fc_hba *hba = port->priv;
1264
1265 reg_base = pci_resource_start(hba->pcidev,
1266 BNX2X_DOORBELL_PCI_BAR);
1267 reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
1268 (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
1269 tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1270 if (!tgt->ctx_base)
1271 return -ENOMEM;
1272 return 0;
1273}
1274
1275char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1276{
1277 char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
1278
1279 if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
1280 return NULL;
1281
1282 tgt->rq_cons_idx += num_items;
1283
1284 if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
1285 tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
1286
1287 return buf;
1288}
1289
1290void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1291{
1292 /* return the rq buffer */
1293 u32 next_prod_idx = tgt->rq_prod_idx + num_items;
1294 if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
1295 /* Wrap around RQ */
1296 next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
1297 }
1298 tgt->rq_prod_idx = next_prod_idx;
1299 tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1300}
1301
1302void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1303 struct fcoe_task_ctx_entry *task,
1304 u16 orig_xid)
1305{
1306 u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
1307 struct bnx2fc_rport *tgt = io_req->tgt;
1308 u32 context_id = tgt->context_id;
1309
1310 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1311
1312 /* Tx Write Rx Read */
1313 task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1314 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
1315 task->tx_wr_rx_rd.init_flags = task_type <<
1316 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
1317 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1318 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
1319 /* Common */
1320 task->cmn.common_flags = context_id <<
1321 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
1322 task->cmn.general.cleanup_info.task_id = orig_xid;
1323
1324
1325}
1326
1327void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1328 struct fcoe_task_ctx_entry *task)
1329{
1330 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1331 struct bnx2fc_rport *tgt = io_req->tgt;
1332 struct fc_frame_header *fc_hdr;
1333 u8 task_type = 0;
1334 u64 *hdr;
1335 u64 temp_hdr[3];
1336 u32 context_id;
1337
1338
1339 /* Obtain task_type */
1340 if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
1341 (io_req->cmd_type == BNX2FC_ELS)) {
1342 task_type = FCOE_TASK_TYPE_MIDPATH;
1343 } else if (io_req->cmd_type == BNX2FC_ABTS) {
1344 task_type = FCOE_TASK_TYPE_ABTS;
1345 }
1346
1347 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1348
1349 /* Setup the task from io_req for easy reference */
1350 io_req->task = task;
1351
1352 BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
1353 io_req->cmd_type, task_type);
1354
1355 /* Tx only */
1356 if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1357 (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
1358 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
1359 (u32)mp_req->mp_req_bd_dma;
1360 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1361 (u32)((u64)mp_req->mp_req_bd_dma >> 32);
1362 task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
1363 BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n",
1364 (unsigned long long)mp_req->mp_req_bd_dma);
1365 }
1366
1367 /* Tx Write Rx Read */
1368 task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1369 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
1370 task->tx_wr_rx_rd.init_flags = task_type <<
1371 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
1372 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
1373 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
1374 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1375 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
1376
1377 /* Common */
1378 task->cmn.data_2_trns = io_req->data_xfer_len;
1379 context_id = tgt->context_id;
1380 task->cmn.common_flags = context_id <<
1381 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
1382 task->cmn.common_flags |= 1 <<
1383 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
1384 task->cmn.common_flags |= 1 <<
1385 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
1386
1387 /* Rx Write Tx Read */
1388 fc_hdr = &(mp_req->req_fc_hdr);
1389 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1390 fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1391 fc_hdr->fh_rx_id = htons(0xffff);
1392 task->rx_wr_tx_rd.rx_id = 0xffff;
1393 } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1394 fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1395 }
1396
1397 /* Fill FC Header into middle path buffer */
1398 hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
1399 memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1400 hdr[0] = cpu_to_be64(temp_hdr[0]);
1401 hdr[1] = cpu_to_be64(temp_hdr[1]);
1402 hdr[2] = cpu_to_be64(temp_hdr[2]);
1403
1404 /* Rx Only */
1405 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1406
1407 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
1408 (u32)mp_req->mp_resp_bd_dma;
1409 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1410 (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
1411 task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
1412 }
1413}
1414
1415void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1416 struct fcoe_task_ctx_entry *task)
1417{
1418 u8 task_type;
1419 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1420 struct io_bdt *bd_tbl = io_req->bd_tbl;
1421 struct bnx2fc_rport *tgt = io_req->tgt;
1422 u64 *fcp_cmnd;
1423 u64 tmp_fcp_cmnd[4];
1424 u32 context_id;
1425 int cnt, i;
1426 int bd_count;
1427
1428 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1429
1430 /* Setup the task from io_req for easy reference */
1431 io_req->task = task;
1432
1433 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1434 task_type = FCOE_TASK_TYPE_WRITE;
1435 else
1436 task_type = FCOE_TASK_TYPE_READ;
1437
1438 /* Tx only */
1439 if (task_type == FCOE_TASK_TYPE_WRITE) {
1440 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
1441 (u32)bd_tbl->bd_tbl_dma;
1442 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1443 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1444 task->tx_wr_only.sgl_ctx.mul_sges.sgl_size =
1445 bd_tbl->bd_valid;
1446 }
1447
1448 /*Tx Write Rx Read */
1449 /* Init state to NORMAL */
1450 task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1451 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
1452 task->tx_wr_rx_rd.init_flags = task_type <<
1453 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
1454 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
1455 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
1456 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1457 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
1458
1459 /* Common */
1460 task->cmn.data_2_trns = io_req->data_xfer_len;
1461 context_id = tgt->context_id;
1462 task->cmn.common_flags = context_id <<
1463 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
1464 task->cmn.common_flags |= 1 <<
1465 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
1466 task->cmn.common_flags |= 1 <<
1467 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
1468
1469 /* Set initiative ownership */
1470 task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT;
1471
1472 /* Set initial seq counter */
1473 task->cmn.tx_low_seq_cnt = 1;
1474
1475 /* Set state to "waiting for the first packet" */
1476 task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME;
1477
1478 /* Fill FCP_CMND IU */
1479 fcp_cmnd = (u64 *)
1480 task->cmn.general.cmd_info.fcp_cmd_payload.opaque;
1481 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1482
1483 /* swap fcp_cmnd */
1484 cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
1485
1486 for (i = 0; i < cnt; i++) {
1487 *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
1488 fcp_cmnd++;
1489 }
1490
1491 /* Rx Write Tx Read */
1492 task->rx_wr_tx_rd.rx_id = 0xffff;
1493
1494 /* Rx Only */
1495 if (task_type == FCOE_TASK_TYPE_READ) {
1496
1497 bd_count = bd_tbl->bd_valid;
1498 if (bd_count == 1) {
1499
1500 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1501
1502 task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo =
1503 fcoe_bd_tbl->buf_addr_lo;
1504 task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi =
1505 fcoe_bd_tbl->buf_addr_hi;
1506 task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem =
1507 fcoe_bd_tbl->buf_len;
1508 task->tx_wr_rx_rd.init_flags |= 1 <<
1509 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT;
1510 } else {
1511
1512 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
1513 (u32)bd_tbl->bd_tbl_dma;
1514 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1515 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1516 task->rx_wr_only.sgl_ctx.mul_sges.sgl_size =
1517 bd_tbl->bd_valid;
1518 }
1519 }
1520}
1521
1522/**
1523 * bnx2fc_setup_task_ctx - allocate and map task context
1524 *
1525 * @hba: pointer to adapter structure
1526 *
1527 * allocate memory for task context, and associated BD table to be used
1528 * by firmware
1529 *
1530 */
1531int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1532{
1533 int rc = 0;
1534 struct regpair *task_ctx_bdt;
1535 dma_addr_t addr;
1536 int i;
1537
1538 /*
1539 * Allocate task context bd table. A page size of bd table
1540 * can map 256 buffers. Each buffer contains 32 task context
1541 * entries. Hence the limit with one page is 8192 task context
1542 * entries.
1543 */
1544 hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1545 PAGE_SIZE,
1546 &hba->task_ctx_bd_dma,
1547 GFP_KERNEL);
1548 if (!hba->task_ctx_bd_tbl) {
1549 printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1550 rc = -1;
1551 goto out;
1552 }
1553 memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
1554
1555 /*
1556 * Allocate task_ctx which is an array of pointers pointing to
1557 * a page containing 32 task contexts
1558 */
1559 hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
1560 GFP_KERNEL);
1561 if (!hba->task_ctx) {
1562 printk(KERN_ERR PFX "unable to allocate task context array\n");
1563 rc = -1;
1564 goto out1;
1565 }
1566
1567 /*
1568 * Allocate task_ctx_dma which is an array of dma addresses
1569 */
1570 hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
1571 sizeof(dma_addr_t)), GFP_KERNEL);
1572 if (!hba->task_ctx_dma) {
1573 printk(KERN_ERR PFX "unable to alloc context mapping array\n");
1574 rc = -1;
1575 goto out2;
1576 }
1577
1578 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1579 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1580
1581 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1582 PAGE_SIZE,
1583 &hba->task_ctx_dma[i],
1584 GFP_KERNEL);
1585 if (!hba->task_ctx[i]) {
1586 printk(KERN_ERR PFX "unable to alloc task context\n");
1587 rc = -1;
1588 goto out3;
1589 }
1590 memset(hba->task_ctx[i], 0, PAGE_SIZE);
1591 addr = (u64)hba->task_ctx_dma[i];
1592 task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1593 task_ctx_bdt->lo = cpu_to_le32((u32)addr);
1594 task_ctx_bdt++;
1595 }
1596 return 0;
1597
1598out3:
1599 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1600 if (hba->task_ctx[i]) {
1601
1602 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1603 hba->task_ctx[i], hba->task_ctx_dma[i]);
1604 hba->task_ctx[i] = NULL;
1605 }
1606 }
1607
1608 kfree(hba->task_ctx_dma);
1609 hba->task_ctx_dma = NULL;
1610out2:
1611 kfree(hba->task_ctx);
1612 hba->task_ctx = NULL;
1613out1:
1614 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1615 hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1616 hba->task_ctx_bd_tbl = NULL;
1617out:
1618 return rc;
1619}
1620
1621void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1622{
1623 int i;
1624
1625 if (hba->task_ctx_bd_tbl) {
1626 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1627 hba->task_ctx_bd_tbl,
1628 hba->task_ctx_bd_dma);
1629 hba->task_ctx_bd_tbl = NULL;
1630 }
1631
1632 if (hba->task_ctx) {
1633 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1634 if (hba->task_ctx[i]) {
1635 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1636 hba->task_ctx[i],
1637 hba->task_ctx_dma[i]);
1638 hba->task_ctx[i] = NULL;
1639 }
1640 }
1641 kfree(hba->task_ctx);
1642 hba->task_ctx = NULL;
1643 }
1644
1645 kfree(hba->task_ctx_dma);
1646 hba->task_ctx_dma = NULL;
1647}
1648
1649static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1650{
1651 int i;
1652 int segment_count;
1653 int hash_table_size;
1654 u32 *pbl;
1655
1656 segment_count = hba->hash_tbl_segment_count;
1657 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1658 sizeof(struct fcoe_hash_table_entry);
1659
1660 pbl = hba->hash_tbl_pbl;
1661 for (i = 0; i < segment_count; ++i) {
1662 dma_addr_t dma_address;
1663
1664 dma_address = le32_to_cpu(*pbl);
1665 ++pbl;
1666 dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
1667 ++pbl;
1668 dma_free_coherent(&hba->pcidev->dev,
1669 BNX2FC_HASH_TBL_CHUNK_SIZE,
1670 hba->hash_tbl_segments[i],
1671 dma_address);
1672
1673 }
1674
1675 if (hba->hash_tbl_pbl) {
1676 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1677 hba->hash_tbl_pbl,
1678 hba->hash_tbl_pbl_dma);
1679 hba->hash_tbl_pbl = NULL;
1680 }
1681}
1682
1683static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
1684{
1685 int i;
1686 int hash_table_size;
1687 int segment_count;
1688 int segment_array_size;
1689 int dma_segment_array_size;
1690 dma_addr_t *dma_segment_array;
1691 u32 *pbl;
1692
1693 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1694 sizeof(struct fcoe_hash_table_entry);
1695
1696 segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
1697 segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
1698 hba->hash_tbl_segment_count = segment_count;
1699
1700 segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
1701 hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
1702 if (!hba->hash_tbl_segments) {
1703 printk(KERN_ERR PFX "hash table pointers alloc failed\n");
1704 return -ENOMEM;
1705 }
1706 dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
1707 dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
1708 if (!dma_segment_array) {
1709 printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
1710 return -ENOMEM;
1711 }
1712
1713 for (i = 0; i < segment_count; ++i) {
1714 hba->hash_tbl_segments[i] =
1715 dma_alloc_coherent(&hba->pcidev->dev,
1716 BNX2FC_HASH_TBL_CHUNK_SIZE,
1717 &dma_segment_array[i],
1718 GFP_KERNEL);
1719 if (!hba->hash_tbl_segments[i]) {
1720 printk(KERN_ERR PFX "hash segment alloc failed\n");
1721 while (--i >= 0) {
1722 dma_free_coherent(&hba->pcidev->dev,
1723 BNX2FC_HASH_TBL_CHUNK_SIZE,
1724 hba->hash_tbl_segments[i],
1725 dma_segment_array[i]);
1726 hba->hash_tbl_segments[i] = NULL;
1727 }
1728 kfree(dma_segment_array);
1729 return -ENOMEM;
1730 }
1731 memset(hba->hash_tbl_segments[i], 0,
1732 BNX2FC_HASH_TBL_CHUNK_SIZE);
1733 }
1734
1735 hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
1736 PAGE_SIZE,
1737 &hba->hash_tbl_pbl_dma,
1738 GFP_KERNEL);
1739 if (!hba->hash_tbl_pbl) {
1740 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
1741 kfree(dma_segment_array);
1742 return -ENOMEM;
1743 }
1744 memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
1745
1746 pbl = hba->hash_tbl_pbl;
1747 for (i = 0; i < segment_count; ++i) {
1748 u64 paddr = dma_segment_array[i];
1749 *pbl = cpu_to_le32((u32) paddr);
1750 ++pbl;
1751 *pbl = cpu_to_le32((u32) (paddr >> 32));
1752 ++pbl;
1753 }
1754 pbl = hba->hash_tbl_pbl;
1755 i = 0;
1756 while (*pbl && *(pbl + 1)) {
1757 u32 lo;
1758 u32 hi;
1759 lo = *pbl;
1760 ++pbl;
1761 hi = *pbl;
1762 ++pbl;
1763 ++i;
1764 }
1765 kfree(dma_segment_array);
1766 return 0;
1767}
1768
1769/**
1770 * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
1771 *
1772 * @hba: Pointer to adapter structure
1773 *
1774 */
1775int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
1776{
1777 u64 addr;
1778 u32 mem_size;
1779 int i;
1780
1781 if (bnx2fc_allocate_hash_table(hba))
1782 return -ENOMEM;
1783
1784 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
1785 hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
1786 &hba->t2_hash_tbl_ptr_dma,
1787 GFP_KERNEL);
1788 if (!hba->t2_hash_tbl_ptr) {
1789 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
1790 bnx2fc_free_fw_resc(hba);
1791 return -ENOMEM;
1792 }
1793 memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
1794
1795 mem_size = BNX2FC_NUM_MAX_SESS *
1796 sizeof(struct fcoe_t2_hash_table_entry);
1797 hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
1798 &hba->t2_hash_tbl_dma,
1799 GFP_KERNEL);
1800 if (!hba->t2_hash_tbl) {
1801 printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
1802 bnx2fc_free_fw_resc(hba);
1803 return -ENOMEM;
1804 }
1805 memset(hba->t2_hash_tbl, 0x00, mem_size);
1806 for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
1807 addr = (unsigned long) hba->t2_hash_tbl_dma +
1808 ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
1809 hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
1810 hba->t2_hash_tbl[i].next.hi = addr >> 32;
1811 }
1812
1813 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
1814 PAGE_SIZE, &hba->dummy_buf_dma,
1815 GFP_KERNEL);
1816 if (!hba->dummy_buffer) {
1817 printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
1818 bnx2fc_free_fw_resc(hba);
1819 return -ENOMEM;
1820 }
1821
1822 hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
1823 PAGE_SIZE,
1824 &hba->stats_buf_dma,
1825 GFP_KERNEL);
1826 if (!hba->stats_buffer) {
1827 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
1828 bnx2fc_free_fw_resc(hba);
1829 return -ENOMEM;
1830 }
1831 memset(hba->stats_buffer, 0x00, PAGE_SIZE);
1832
1833 return 0;
1834}
1835
1836void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
1837{
1838 u32 mem_size;
1839
1840 if (hba->stats_buffer) {
1841 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1842 hba->stats_buffer, hba->stats_buf_dma);
1843 hba->stats_buffer = NULL;
1844 }
1845
1846 if (hba->dummy_buffer) {
1847 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1848 hba->dummy_buffer, hba->dummy_buf_dma);
1849 hba->dummy_buffer = NULL;
1850 }
1851
1852 if (hba->t2_hash_tbl_ptr) {
1853 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
1854 dma_free_coherent(&hba->pcidev->dev, mem_size,
1855 hba->t2_hash_tbl_ptr,
1856 hba->t2_hash_tbl_ptr_dma);
1857 hba->t2_hash_tbl_ptr = NULL;
1858 }
1859
1860 if (hba->t2_hash_tbl) {
1861 mem_size = BNX2FC_NUM_MAX_SESS *
1862 sizeof(struct fcoe_t2_hash_table_entry);
1863 dma_free_coherent(&hba->pcidev->dev, mem_size,
1864 hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
1865 hba->t2_hash_tbl = NULL;
1866 }
1867 bnx2fc_free_hash_table(hba);
1868}