diff options
author | Naresh Kumar Inna <naresh@chelsio.com> | 2012-11-15 12:11:18 -0500 |
---|---|---|
committer | James Bottomley <JBottomley@Parallels.com> | 2012-11-27 00:00:39 -0500 |
commit | a3667aaed5698b84bad2f1b3f71adc86499f4bc6 (patch) | |
tree | f5b6f2b9ac646c84325b4e4862598452f479d30e /drivers/scsi/csiostor | |
parent | ce91a9234c16b6d480847f49ea504f66b3f6e350 (diff) |
[SCSI] csiostor: Chelsio FCoE offload driver
Signed-off-by: Naresh Kumar Inna <naresh@chelsio.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/csiostor')
-rw-r--r-- | drivers/scsi/csiostor/Kconfig | 19 | ||||
-rw-r--r-- | drivers/scsi/csiostor/Makefile | 11 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_attr.c | 796 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_defs.h | 121 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_hw.c | 4395 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_hw.h | 667 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_init.c | 1274 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_init.h | 158 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_isr.c | 624 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_lnode.c | 2133 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_lnode.h | 255 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_mb.c | 1770 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_mb.h | 278 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_rnode.c | 912 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_rnode.h | 141 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_scsi.c | 2555 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_scsi.h | 342 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_wr.c | 1632 | ||||
-rw-r--r-- | drivers/scsi/csiostor/csio_wr.h | 512 | ||||
-rw-r--r-- | drivers/scsi/csiostor/t4fw_api_stor.h | 578 |
20 files changed, 19173 insertions, 0 deletions
diff --git a/drivers/scsi/csiostor/Kconfig b/drivers/scsi/csiostor/Kconfig new file mode 100644 index 000000000000..4d03b032aa10 --- /dev/null +++ b/drivers/scsi/csiostor/Kconfig | |||
@@ -0,0 +1,19 @@ | |||
1 | config SCSI_CHELSIO_FCOE | ||
2 | tristate "Chelsio Communications FCoE support" | ||
3 | depends on PCI && SCSI | ||
4 | select SCSI_FC_ATTRS | ||
5 | select FW_LOADER | ||
6 | help | ||
7 | This driver supports FCoE Offload functionality over | ||
8 | Chelsio T4-based 10Gb Converged Network Adapters. | ||
9 | |||
10 | For general information about Chelsio and our products, visit | ||
11 | our website at <http://www.chelsio.com>. | ||
12 | |||
13 | For customer support, please visit our customer support page at | ||
14 | <http://www.chelsio.com/support.html>. | ||
15 | |||
16 | Please send feedback to <linux-bugs@chelsio.com>. | ||
17 | |||
18 | To compile this driver as a module choose M here; the module | ||
19 | will be called csiostor. | ||
diff --git a/drivers/scsi/csiostor/Makefile b/drivers/scsi/csiostor/Makefile new file mode 100644 index 000000000000..b581966c88f9 --- /dev/null +++ b/drivers/scsi/csiostor/Makefile | |||
@@ -0,0 +1,11 @@ | |||
1 | # | ||
2 | ## Chelsio FCoE driver | ||
3 | # | ||
4 | ## | ||
5 | |||
6 | ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4 | ||
7 | |||
8 | obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o | ||
9 | |||
10 | csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \ | ||
11 | csio_hw.o csio_isr.o csio_mb.o csio_rnode.o csio_wr.o | ||
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c new file mode 100644 index 000000000000..c8cf85785db2 --- /dev/null +++ b/drivers/scsi/csiostor/csio_attr.c | |||
@@ -0,0 +1,796 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/string.h> | ||
37 | #include <linux/delay.h> | ||
38 | #include <linux/module.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/pci.h> | ||
41 | #include <linux/mm.h> | ||
42 | #include <linux/jiffies.h> | ||
43 | #include <scsi/fc/fc_fs.h> | ||
44 | |||
45 | #include "csio_init.h" | ||
46 | |||
47 | static void | ||
48 | csio_vport_set_state(struct csio_lnode *ln); | ||
49 | |||
50 | /* | ||
51 | * csio_reg_rnode - Register a remote port with FC transport. | ||
52 | * @rn: Rnode representing remote port. | ||
53 | * | ||
54 | * Call fc_remote_port_add() to register this remote port with FC transport. | ||
55 | * If remote port is Initiator OR Target OR both, change the role appropriately. | ||
56 | * | ||
57 | */ | ||
58 | void | ||
59 | csio_reg_rnode(struct csio_rnode *rn) | ||
60 | { | ||
61 | struct csio_lnode *ln = csio_rnode_to_lnode(rn); | ||
62 | struct Scsi_Host *shost = csio_ln_to_shost(ln); | ||
63 | struct fc_rport_identifiers ids; | ||
64 | struct fc_rport *rport; | ||
65 | struct csio_service_parms *sp; | ||
66 | |||
67 | ids.node_name = wwn_to_u64(csio_rn_wwnn(rn)); | ||
68 | ids.port_name = wwn_to_u64(csio_rn_wwpn(rn)); | ||
69 | ids.port_id = rn->nport_id; | ||
70 | ids.roles = FC_RPORT_ROLE_UNKNOWN; | ||
71 | |||
72 | if (rn->role & CSIO_RNFR_INITIATOR || rn->role & CSIO_RNFR_TARGET) { | ||
73 | rport = rn->rport; | ||
74 | CSIO_ASSERT(rport != NULL); | ||
75 | goto update_role; | ||
76 | } | ||
77 | |||
78 | rn->rport = fc_remote_port_add(shost, 0, &ids); | ||
79 | if (!rn->rport) { | ||
80 | csio_ln_err(ln, "Failed to register rport = 0x%x.\n", | ||
81 | rn->nport_id); | ||
82 | return; | ||
83 | } | ||
84 | |||
85 | ln->num_reg_rnodes++; | ||
86 | rport = rn->rport; | ||
87 | spin_lock_irq(shost->host_lock); | ||
88 | *((struct csio_rnode **)rport->dd_data) = rn; | ||
89 | spin_unlock_irq(shost->host_lock); | ||
90 | |||
91 | sp = &rn->rn_sparm; | ||
92 | rport->maxframe_size = sp->csp.sp_bb_data; | ||
93 | if (ntohs(sp->clsp[2].cp_class) & FC_CPC_VALID) | ||
94 | rport->supported_classes = FC_COS_CLASS3; | ||
95 | else | ||
96 | rport->supported_classes = FC_COS_UNSPECIFIED; | ||
97 | update_role: | ||
98 | if (rn->role & CSIO_RNFR_INITIATOR) | ||
99 | ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; | ||
100 | if (rn->role & CSIO_RNFR_TARGET) | ||
101 | ids.roles |= FC_RPORT_ROLE_FCP_TARGET; | ||
102 | |||
103 | if (ids.roles != FC_RPORT_ROLE_UNKNOWN) | ||
104 | fc_remote_port_rolechg(rport, ids.roles); | ||
105 | |||
106 | rn->scsi_id = rport->scsi_target_id; | ||
107 | |||
108 | csio_ln_dbg(ln, "Remote port x%x role 0x%x registered\n", | ||
109 | rn->nport_id, ids.roles); | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * csio_unreg_rnode - Unregister a remote port with FC transport. | ||
114 | * @rn: Rnode representing remote port. | ||
115 | * | ||
116 | * Call fc_remote_port_delete() to unregister this remote port with FC | ||
117 | * transport. | ||
118 | * | ||
119 | */ | ||
120 | void | ||
121 | csio_unreg_rnode(struct csio_rnode *rn) | ||
122 | { | ||
123 | struct csio_lnode *ln = csio_rnode_to_lnode(rn); | ||
124 | struct fc_rport *rport = rn->rport; | ||
125 | |||
126 | rn->role &= ~(CSIO_RNFR_INITIATOR | CSIO_RNFR_TARGET); | ||
127 | fc_remote_port_delete(rport); | ||
128 | ln->num_reg_rnodes--; | ||
129 | |||
130 | csio_ln_dbg(ln, "Remote port x%x un-registered\n", rn->nport_id); | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * csio_lnode_async_event - Async events from local port. | ||
135 | * @ln: lnode representing local port. | ||
136 | * | ||
137 | * Async events from local node that FC transport/SCSI ML | ||
138 | * should be made aware of (Eg: RSCN). | ||
139 | */ | ||
140 | void | ||
141 | csio_lnode_async_event(struct csio_lnode *ln, enum csio_ln_fc_evt fc_evt) | ||
142 | { | ||
143 | switch (fc_evt) { | ||
144 | case CSIO_LN_FC_RSCN: | ||
145 | /* Get payload of rscn from ln */ | ||
146 | /* For each RSCN entry */ | ||
147 | /* | ||
148 | * fc_host_post_event(shost, | ||
149 | * fc_get_event_number(), | ||
150 | * FCH_EVT_RSCN, | ||
151 | * rscn_entry); | ||
152 | */ | ||
153 | break; | ||
154 | case CSIO_LN_FC_LINKUP: | ||
155 | /* send fc_host_post_event */ | ||
156 | /* set vport state */ | ||
157 | if (csio_is_npiv_ln(ln)) | ||
158 | csio_vport_set_state(ln); | ||
159 | |||
160 | break; | ||
161 | case CSIO_LN_FC_LINKDOWN: | ||
162 | /* send fc_host_post_event */ | ||
163 | /* set vport state */ | ||
164 | if (csio_is_npiv_ln(ln)) | ||
165 | csio_vport_set_state(ln); | ||
166 | |||
167 | break; | ||
168 | case CSIO_LN_FC_ATTRIB_UPDATE: | ||
169 | csio_fchost_attr_init(ln); | ||
170 | break; | ||
171 | default: | ||
172 | break; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * csio_fchost_attr_init - Initialize FC transport attributes | ||
178 | * @ln: Lnode. | ||
179 | * | ||
180 | */ | ||
181 | void | ||
182 | csio_fchost_attr_init(struct csio_lnode *ln) | ||
183 | { | ||
184 | struct Scsi_Host *shost = csio_ln_to_shost(ln); | ||
185 | |||
186 | fc_host_node_name(shost) = wwn_to_u64(csio_ln_wwnn(ln)); | ||
187 | fc_host_port_name(shost) = wwn_to_u64(csio_ln_wwpn(ln)); | ||
188 | |||
189 | fc_host_supported_classes(shost) = FC_COS_CLASS3; | ||
190 | fc_host_max_npiv_vports(shost) = | ||
191 | (csio_lnode_to_hw(ln))->fres_info.max_vnps; | ||
192 | fc_host_supported_speeds(shost) = FC_PORTSPEED_10GBIT | | ||
193 | FC_PORTSPEED_1GBIT; | ||
194 | |||
195 | fc_host_maxframe_size(shost) = ln->ln_sparm.csp.sp_bb_data; | ||
196 | memset(fc_host_supported_fc4s(shost), 0, | ||
197 | sizeof(fc_host_supported_fc4s(shost))); | ||
198 | fc_host_supported_fc4s(shost)[7] = 1; | ||
199 | |||
200 | memset(fc_host_active_fc4s(shost), 0, | ||
201 | sizeof(fc_host_active_fc4s(shost))); | ||
202 | fc_host_active_fc4s(shost)[7] = 1; | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * csio_get_host_port_id - sysfs entries for nport_id is | ||
207 | * populated/cached from this function | ||
208 | */ | ||
209 | static void | ||
210 | csio_get_host_port_id(struct Scsi_Host *shost) | ||
211 | { | ||
212 | struct csio_lnode *ln = shost_priv(shost); | ||
213 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
214 | |||
215 | spin_lock_irq(&hw->lock); | ||
216 | fc_host_port_id(shost) = ln->nport_id; | ||
217 | spin_unlock_irq(&hw->lock); | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * csio_get_port_type - Return FC local port type. | ||
222 | * @shost: scsi host. | ||
223 | * | ||
224 | */ | ||
225 | static void | ||
226 | csio_get_host_port_type(struct Scsi_Host *shost) | ||
227 | { | ||
228 | struct csio_lnode *ln = shost_priv(shost); | ||
229 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
230 | |||
231 | spin_lock_irq(&hw->lock); | ||
232 | if (csio_is_npiv_ln(ln)) | ||
233 | fc_host_port_type(shost) = FC_PORTTYPE_NPIV; | ||
234 | else | ||
235 | fc_host_port_type(shost) = FC_PORTTYPE_NPORT; | ||
236 | spin_unlock_irq(&hw->lock); | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * csio_get_port_state - Return FC local port state. | ||
241 | * @shost: scsi host. | ||
242 | * | ||
243 | */ | ||
244 | static void | ||
245 | csio_get_host_port_state(struct Scsi_Host *shost) | ||
246 | { | ||
247 | struct csio_lnode *ln = shost_priv(shost); | ||
248 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
249 | char state[16]; | ||
250 | |||
251 | spin_lock_irq(&hw->lock); | ||
252 | |||
253 | csio_lnode_state_to_str(ln, state); | ||
254 | if (!strcmp(state, "READY")) | ||
255 | fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; | ||
256 | else if (!strcmp(state, "OFFLINE")) | ||
257 | fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; | ||
258 | else | ||
259 | fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; | ||
260 | |||
261 | spin_unlock_irq(&hw->lock); | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * csio_get_host_speed - Return link speed to FC transport. | ||
266 | * @shost: scsi host. | ||
267 | * | ||
268 | */ | ||
269 | static void | ||
270 | csio_get_host_speed(struct Scsi_Host *shost) | ||
271 | { | ||
272 | struct csio_lnode *ln = shost_priv(shost); | ||
273 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
274 | |||
275 | spin_lock_irq(&hw->lock); | ||
276 | switch (hw->pport[ln->portid].link_speed) { | ||
277 | case FW_PORT_CAP_SPEED_1G: | ||
278 | fc_host_speed(shost) = FC_PORTSPEED_1GBIT; | ||
279 | break; | ||
280 | case FW_PORT_CAP_SPEED_10G: | ||
281 | fc_host_speed(shost) = FC_PORTSPEED_10GBIT; | ||
282 | break; | ||
283 | default: | ||
284 | fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; | ||
285 | break; | ||
286 | } | ||
287 | spin_unlock_irq(&hw->lock); | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * csio_get_host_fabric_name - Return fabric name | ||
292 | * @shost: scsi host. | ||
293 | * | ||
294 | */ | ||
295 | static void | ||
296 | csio_get_host_fabric_name(struct Scsi_Host *shost) | ||
297 | { | ||
298 | struct csio_lnode *ln = shost_priv(shost); | ||
299 | struct csio_rnode *rn = NULL; | ||
300 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
301 | |||
302 | spin_lock_irq(&hw->lock); | ||
303 | rn = csio_rnode_lookup_portid(ln, FC_FID_FLOGI); | ||
304 | if (rn) | ||
305 | fc_host_fabric_name(shost) = wwn_to_u64(csio_rn_wwnn(rn)); | ||
306 | else | ||
307 | fc_host_fabric_name(shost) = 0; | ||
308 | spin_unlock_irq(&hw->lock); | ||
309 | } | ||
310 | |||
311 | /* | ||
312 | * csio_get_host_speed - Return FC transport statistics. | ||
313 | * @ln: Lnode. | ||
314 | * | ||
315 | */ | ||
316 | static struct fc_host_statistics * | ||
317 | csio_get_stats(struct Scsi_Host *shost) | ||
318 | { | ||
319 | struct csio_lnode *ln = shost_priv(shost); | ||
320 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
321 | struct fc_host_statistics *fhs = &ln->fch_stats; | ||
322 | struct fw_fcoe_port_stats fcoe_port_stats; | ||
323 | uint64_t seconds; | ||
324 | |||
325 | memset(&fcoe_port_stats, 0, sizeof(struct fw_fcoe_port_stats)); | ||
326 | csio_get_phy_port_stats(hw, ln->portid, &fcoe_port_stats); | ||
327 | |||
328 | fhs->tx_frames += (fcoe_port_stats.tx_bcast_frames + | ||
329 | fcoe_port_stats.tx_mcast_frames + | ||
330 | fcoe_port_stats.tx_ucast_frames + | ||
331 | fcoe_port_stats.tx_offload_frames); | ||
332 | fhs->tx_words += (fcoe_port_stats.tx_bcast_bytes + | ||
333 | fcoe_port_stats.tx_mcast_bytes + | ||
334 | fcoe_port_stats.tx_ucast_bytes + | ||
335 | fcoe_port_stats.tx_offload_bytes) / | ||
336 | CSIO_WORD_TO_BYTE; | ||
337 | fhs->rx_frames += (fcoe_port_stats.rx_bcast_frames + | ||
338 | fcoe_port_stats.rx_mcast_frames + | ||
339 | fcoe_port_stats.rx_ucast_frames); | ||
340 | fhs->rx_words += (fcoe_port_stats.rx_bcast_bytes + | ||
341 | fcoe_port_stats.rx_mcast_bytes + | ||
342 | fcoe_port_stats.rx_ucast_bytes) / | ||
343 | CSIO_WORD_TO_BYTE; | ||
344 | fhs->error_frames += fcoe_port_stats.rx_err_frames; | ||
345 | fhs->fcp_input_requests += ln->stats.n_input_requests; | ||
346 | fhs->fcp_output_requests += ln->stats.n_output_requests; | ||
347 | fhs->fcp_control_requests += ln->stats.n_control_requests; | ||
348 | fhs->fcp_input_megabytes += ln->stats.n_input_bytes >> 20; | ||
349 | fhs->fcp_output_megabytes += ln->stats.n_output_bytes >> 20; | ||
350 | fhs->link_failure_count = ln->stats.n_link_down; | ||
351 | /* Reset stats for the device */ | ||
352 | seconds = jiffies_to_msecs(jiffies) - hw->stats.n_reset_start; | ||
353 | do_div(seconds, 1000); | ||
354 | fhs->seconds_since_last_reset = seconds; | ||
355 | |||
356 | return fhs; | ||
357 | } | ||
358 | |||
359 | /* | ||
360 | * csio_set_rport_loss_tmo - Set the rport dev loss timeout | ||
361 | * @rport: fc rport. | ||
362 | * @timeout: new value for dev loss tmo. | ||
363 | * | ||
364 | * If timeout is non zero set the dev_loss_tmo to timeout, else set | ||
365 | * dev_loss_tmo to one. | ||
366 | */ | ||
367 | static void | ||
368 | csio_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) | ||
369 | { | ||
370 | if (timeout) | ||
371 | rport->dev_loss_tmo = timeout; | ||
372 | else | ||
373 | rport->dev_loss_tmo = 1; | ||
374 | } | ||
375 | |||
376 | static void | ||
377 | csio_vport_set_state(struct csio_lnode *ln) | ||
378 | { | ||
379 | struct fc_vport *fc_vport = ln->fc_vport; | ||
380 | struct csio_lnode *pln = ln->pln; | ||
381 | char state[16]; | ||
382 | |||
383 | /* Set fc vport state based on phyiscal lnode */ | ||
384 | csio_lnode_state_to_str(pln, state); | ||
385 | if (strcmp(state, "READY")) { | ||
386 | fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); | ||
387 | return; | ||
388 | } | ||
389 | |||
390 | if (!(pln->flags & CSIO_LNF_NPIVSUPP)) { | ||
391 | fc_vport_set_state(fc_vport, FC_VPORT_NO_FABRIC_SUPP); | ||
392 | return; | ||
393 | } | ||
394 | |||
395 | /* Set fc vport state based on virtual lnode */ | ||
396 | csio_lnode_state_to_str(ln, state); | ||
397 | if (strcmp(state, "READY")) { | ||
398 | fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); | ||
399 | return; | ||
400 | } | ||
401 | fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); | ||
402 | } | ||
403 | |||
404 | static int | ||
405 | csio_fcoe_alloc_vnp(struct csio_hw *hw, struct csio_lnode *ln) | ||
406 | { | ||
407 | struct csio_lnode *pln; | ||
408 | struct csio_mb *mbp; | ||
409 | struct fw_fcoe_vnp_cmd *rsp; | ||
410 | int ret = 0; | ||
411 | int retry = 0; | ||
412 | |||
413 | /* Issue VNP cmd to alloc vport */ | ||
414 | /* Allocate Mbox request */ | ||
415 | spin_lock_irq(&hw->lock); | ||
416 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
417 | if (!mbp) { | ||
418 | CSIO_INC_STATS(hw, n_err_nomem); | ||
419 | ret = -ENOMEM; | ||
420 | goto out; | ||
421 | } | ||
422 | |||
423 | pln = ln->pln; | ||
424 | ln->fcf_flowid = pln->fcf_flowid; | ||
425 | ln->portid = pln->portid; | ||
426 | |||
427 | csio_fcoe_vnp_alloc_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, | ||
428 | pln->fcf_flowid, pln->vnp_flowid, 0, | ||
429 | csio_ln_wwnn(ln), csio_ln_wwpn(ln), NULL); | ||
430 | |||
431 | for (retry = 0; retry < 3; retry++) { | ||
432 | /* FW is expected to complete vnp cmd in immediate mode | ||
433 | * without much delay. | ||
434 | * Otherwise, there will be increase in IO latency since HW | ||
435 | * lock is held till completion of vnp mbox cmd. | ||
436 | */ | ||
437 | ret = csio_mb_issue(hw, mbp); | ||
438 | if (ret != -EBUSY) | ||
439 | break; | ||
440 | |||
441 | /* Retry if mbox returns busy */ | ||
442 | spin_unlock_irq(&hw->lock); | ||
443 | msleep(2000); | ||
444 | spin_lock_irq(&hw->lock); | ||
445 | } | ||
446 | |||
447 | if (ret) { | ||
448 | csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n"); | ||
449 | goto out_free; | ||
450 | } | ||
451 | |||
452 | /* Process Mbox response of VNP command */ | ||
453 | rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); | ||
454 | if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) { | ||
455 | csio_ln_err(ln, "FCOE VNP ALLOC cmd returned 0x%x!\n", | ||
456 | FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16))); | ||
457 | ret = -EINVAL; | ||
458 | goto out_free; | ||
459 | } | ||
460 | |||
461 | ln->vnp_flowid = FW_FCOE_VNP_CMD_VNPI_GET( | ||
462 | ntohl(rsp->gen_wwn_to_vnpi)); | ||
463 | memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8); | ||
464 | memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8); | ||
465 | |||
466 | csio_ln_dbg(ln, "FCOE VNPI: 0x%x\n", ln->vnp_flowid); | ||
467 | csio_ln_dbg(ln, "\tWWNN: %x%x%x%x%x%x%x%x\n", | ||
468 | ln->ln_sparm.wwnn[0], ln->ln_sparm.wwnn[1], | ||
469 | ln->ln_sparm.wwnn[2], ln->ln_sparm.wwnn[3], | ||
470 | ln->ln_sparm.wwnn[4], ln->ln_sparm.wwnn[5], | ||
471 | ln->ln_sparm.wwnn[6], ln->ln_sparm.wwnn[7]); | ||
472 | csio_ln_dbg(ln, "\tWWPN: %x%x%x%x%x%x%x%x\n", | ||
473 | ln->ln_sparm.wwpn[0], ln->ln_sparm.wwpn[1], | ||
474 | ln->ln_sparm.wwpn[2], ln->ln_sparm.wwpn[3], | ||
475 | ln->ln_sparm.wwpn[4], ln->ln_sparm.wwpn[5], | ||
476 | ln->ln_sparm.wwpn[6], ln->ln_sparm.wwpn[7]); | ||
477 | |||
478 | out_free: | ||
479 | mempool_free(mbp, hw->mb_mempool); | ||
480 | out: | ||
481 | spin_unlock_irq(&hw->lock); | ||
482 | return ret; | ||
483 | } | ||
484 | |||
485 | static int | ||
486 | csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln) | ||
487 | { | ||
488 | struct csio_lnode *pln; | ||
489 | struct csio_mb *mbp; | ||
490 | struct fw_fcoe_vnp_cmd *rsp; | ||
491 | int ret = 0; | ||
492 | int retry = 0; | ||
493 | |||
494 | /* Issue VNP cmd to free vport */ | ||
495 | /* Allocate Mbox request */ | ||
496 | |||
497 | spin_lock_irq(&hw->lock); | ||
498 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
499 | if (!mbp) { | ||
500 | CSIO_INC_STATS(hw, n_err_nomem); | ||
501 | ret = -ENOMEM; | ||
502 | goto out; | ||
503 | } | ||
504 | |||
505 | pln = ln->pln; | ||
506 | |||
507 | csio_fcoe_vnp_free_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, | ||
508 | ln->fcf_flowid, ln->vnp_flowid, | ||
509 | NULL); | ||
510 | |||
511 | for (retry = 0; retry < 3; retry++) { | ||
512 | ret = csio_mb_issue(hw, mbp); | ||
513 | if (ret != -EBUSY) | ||
514 | break; | ||
515 | |||
516 | /* Retry if mbox returns busy */ | ||
517 | spin_unlock_irq(&hw->lock); | ||
518 | msleep(2000); | ||
519 | spin_lock_irq(&hw->lock); | ||
520 | } | ||
521 | |||
522 | if (ret) { | ||
523 | csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n"); | ||
524 | goto out_free; | ||
525 | } | ||
526 | |||
527 | /* Process Mbox response of VNP command */ | ||
528 | rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); | ||
529 | if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) { | ||
530 | csio_ln_err(ln, "FCOE VNP FREE cmd returned 0x%x!\n", | ||
531 | FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16))); | ||
532 | ret = -EINVAL; | ||
533 | } | ||
534 | |||
535 | out_free: | ||
536 | mempool_free(mbp, hw->mb_mempool); | ||
537 | out: | ||
538 | spin_unlock_irq(&hw->lock); | ||
539 | return ret; | ||
540 | } | ||
541 | |||
542 | static int | ||
543 | csio_vport_create(struct fc_vport *fc_vport, bool disable) | ||
544 | { | ||
545 | struct Scsi_Host *shost = fc_vport->shost; | ||
546 | struct csio_lnode *pln = shost_priv(shost); | ||
547 | struct csio_lnode *ln = NULL; | ||
548 | struct csio_hw *hw = csio_lnode_to_hw(pln); | ||
549 | uint8_t wwn[8]; | ||
550 | int ret = -1; | ||
551 | |||
552 | ln = csio_shost_init(hw, &fc_vport->dev, false, pln); | ||
553 | if (!ln) | ||
554 | goto error; | ||
555 | |||
556 | if (fc_vport->node_name != 0) { | ||
557 | u64_to_wwn(fc_vport->node_name, wwn); | ||
558 | |||
559 | if (!CSIO_VALID_WWN(wwn)) { | ||
560 | csio_ln_err(ln, | ||
561 | "vport create failed. Invalid wwnn\n"); | ||
562 | goto error; | ||
563 | } | ||
564 | memcpy(csio_ln_wwnn(ln), wwn, 8); | ||
565 | } | ||
566 | |||
567 | if (fc_vport->port_name != 0) { | ||
568 | u64_to_wwn(fc_vport->port_name, wwn); | ||
569 | |||
570 | if (!CSIO_VALID_WWN(wwn)) { | ||
571 | csio_ln_err(ln, | ||
572 | "vport create failed. Invalid wwpn\n"); | ||
573 | goto error; | ||
574 | } | ||
575 | |||
576 | if (csio_lnode_lookup_by_wwpn(hw, wwn)) { | ||
577 | csio_ln_err(ln, | ||
578 | "vport create failed. wwpn already exists\n"); | ||
579 | goto error; | ||
580 | } | ||
581 | memcpy(csio_ln_wwpn(ln), wwn, 8); | ||
582 | } | ||
583 | |||
584 | fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING); | ||
585 | |||
586 | if (csio_fcoe_alloc_vnp(hw, ln)) | ||
587 | goto error; | ||
588 | |||
589 | *(struct csio_lnode **)fc_vport->dd_data = ln; | ||
590 | ln->fc_vport = fc_vport; | ||
591 | if (!fc_vport->node_name) | ||
592 | fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln)); | ||
593 | if (!fc_vport->port_name) | ||
594 | fc_vport->port_name = wwn_to_u64(csio_ln_wwpn(ln)); | ||
595 | csio_fchost_attr_init(ln); | ||
596 | return 0; | ||
597 | error: | ||
598 | if (ln) | ||
599 | csio_shost_exit(ln); | ||
600 | |||
601 | return ret; | ||
602 | } | ||
603 | |||
604 | static int | ||
605 | csio_vport_delete(struct fc_vport *fc_vport) | ||
606 | { | ||
607 | struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data; | ||
608 | struct Scsi_Host *shost = csio_ln_to_shost(ln); | ||
609 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
610 | int rmv; | ||
611 | |||
612 | spin_lock_irq(&hw->lock); | ||
613 | rmv = csio_is_hw_removing(hw); | ||
614 | spin_unlock_irq(&hw->lock); | ||
615 | |||
616 | if (rmv) { | ||
617 | csio_shost_exit(ln); | ||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | /* Quiesce ios and send remove event to lnode */ | ||
622 | scsi_block_requests(shost); | ||
623 | spin_lock_irq(&hw->lock); | ||
624 | csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln); | ||
625 | csio_lnode_close(ln); | ||
626 | spin_unlock_irq(&hw->lock); | ||
627 | scsi_unblock_requests(shost); | ||
628 | |||
629 | /* Free vnp */ | ||
630 | if (fc_vport->vport_state != FC_VPORT_DISABLED) | ||
631 | csio_fcoe_free_vnp(hw, ln); | ||
632 | |||
633 | csio_shost_exit(ln); | ||
634 | return 0; | ||
635 | } | ||
636 | |||
637 | static int | ||
638 | csio_vport_disable(struct fc_vport *fc_vport, bool disable) | ||
639 | { | ||
640 | struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data; | ||
641 | struct Scsi_Host *shost = csio_ln_to_shost(ln); | ||
642 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
643 | |||
644 | /* disable vport */ | ||
645 | if (disable) { | ||
646 | /* Quiesce ios and send stop event to lnode */ | ||
647 | scsi_block_requests(shost); | ||
648 | spin_lock_irq(&hw->lock); | ||
649 | csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln); | ||
650 | csio_lnode_stop(ln); | ||
651 | spin_unlock_irq(&hw->lock); | ||
652 | scsi_unblock_requests(shost); | ||
653 | |||
654 | /* Free vnp */ | ||
655 | csio_fcoe_free_vnp(hw, ln); | ||
656 | fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); | ||
657 | csio_ln_err(ln, "vport disabled\n"); | ||
658 | return 0; | ||
659 | } else { | ||
660 | /* enable vport */ | ||
661 | fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING); | ||
662 | if (csio_fcoe_alloc_vnp(hw, ln)) { | ||
663 | csio_ln_err(ln, "vport enabled failed.\n"); | ||
664 | return -1; | ||
665 | } | ||
666 | csio_ln_err(ln, "vport enabled\n"); | ||
667 | return 0; | ||
668 | } | ||
669 | } | ||
670 | |||
671 | static void | ||
672 | csio_dev_loss_tmo_callbk(struct fc_rport *rport) | ||
673 | { | ||
674 | struct csio_rnode *rn; | ||
675 | struct csio_hw *hw; | ||
676 | struct csio_lnode *ln; | ||
677 | |||
678 | rn = *((struct csio_rnode **)rport->dd_data); | ||
679 | ln = csio_rnode_to_lnode(rn); | ||
680 | hw = csio_lnode_to_hw(ln); | ||
681 | |||
682 | spin_lock_irq(&hw->lock); | ||
683 | |||
684 | /* return if driver is being removed or same rnode comes back online */ | ||
685 | if (csio_is_hw_removing(hw) || csio_is_rnode_ready(rn)) | ||
686 | goto out; | ||
687 | |||
688 | csio_ln_dbg(ln, "devloss timeout on rnode:%p portid:x%x flowid:x%x\n", | ||
689 | rn, rn->nport_id, csio_rn_flowid(rn)); | ||
690 | |||
691 | CSIO_INC_STATS(ln, n_dev_loss_tmo); | ||
692 | |||
693 | /* | ||
694 | * enqueue devloss event to event worker thread to serialize all | ||
695 | * rnode events. | ||
696 | */ | ||
697 | if (csio_enqueue_evt(hw, CSIO_EVT_DEV_LOSS, &rn, sizeof(rn))) { | ||
698 | CSIO_INC_STATS(hw, n_evt_drop); | ||
699 | goto out; | ||
700 | } | ||
701 | |||
702 | if (!(hw->flags & CSIO_HWF_FWEVT_PENDING)) { | ||
703 | hw->flags |= CSIO_HWF_FWEVT_PENDING; | ||
704 | spin_unlock_irq(&hw->lock); | ||
705 | schedule_work(&hw->evtq_work); | ||
706 | return; | ||
707 | } | ||
708 | |||
709 | out: | ||
710 | spin_unlock_irq(&hw->lock); | ||
711 | } | ||
712 | |||
713 | /* FC transport functions template - Physical port */ | ||
714 | struct fc_function_template csio_fc_transport_funcs = { | ||
715 | .show_host_node_name = 1, | ||
716 | .show_host_port_name = 1, | ||
717 | .show_host_supported_classes = 1, | ||
718 | .show_host_supported_fc4s = 1, | ||
719 | .show_host_maxframe_size = 1, | ||
720 | |||
721 | .get_host_port_id = csio_get_host_port_id, | ||
722 | .show_host_port_id = 1, | ||
723 | |||
724 | .get_host_port_type = csio_get_host_port_type, | ||
725 | .show_host_port_type = 1, | ||
726 | |||
727 | .get_host_port_state = csio_get_host_port_state, | ||
728 | .show_host_port_state = 1, | ||
729 | |||
730 | .show_host_active_fc4s = 1, | ||
731 | .get_host_speed = csio_get_host_speed, | ||
732 | .show_host_speed = 1, | ||
733 | .get_host_fabric_name = csio_get_host_fabric_name, | ||
734 | .show_host_fabric_name = 1, | ||
735 | |||
736 | .get_fc_host_stats = csio_get_stats, | ||
737 | |||
738 | .dd_fcrport_size = sizeof(struct csio_rnode *), | ||
739 | .show_rport_maxframe_size = 1, | ||
740 | .show_rport_supported_classes = 1, | ||
741 | |||
742 | .set_rport_dev_loss_tmo = csio_set_rport_loss_tmo, | ||
743 | .show_rport_dev_loss_tmo = 1, | ||
744 | |||
745 | .show_starget_port_id = 1, | ||
746 | .show_starget_node_name = 1, | ||
747 | .show_starget_port_name = 1, | ||
748 | |||
749 | .dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk, | ||
750 | .dd_fcvport_size = sizeof(struct csio_lnode *), | ||
751 | |||
752 | .vport_create = csio_vport_create, | ||
753 | .vport_disable = csio_vport_disable, | ||
754 | .vport_delete = csio_vport_delete, | ||
755 | }; | ||
756 | |||
757 | /* FC transport functions template - Virtual port */ | ||
758 | struct fc_function_template csio_fc_transport_vport_funcs = { | ||
759 | .show_host_node_name = 1, | ||
760 | .show_host_port_name = 1, | ||
761 | .show_host_supported_classes = 1, | ||
762 | .show_host_supported_fc4s = 1, | ||
763 | .show_host_maxframe_size = 1, | ||
764 | |||
765 | .get_host_port_id = csio_get_host_port_id, | ||
766 | .show_host_port_id = 1, | ||
767 | |||
768 | .get_host_port_type = csio_get_host_port_type, | ||
769 | .show_host_port_type = 1, | ||
770 | |||
771 | .get_host_port_state = csio_get_host_port_state, | ||
772 | .show_host_port_state = 1, | ||
773 | .show_host_active_fc4s = 1, | ||
774 | |||
775 | .get_host_speed = csio_get_host_speed, | ||
776 | .show_host_speed = 1, | ||
777 | |||
778 | .get_host_fabric_name = csio_get_host_fabric_name, | ||
779 | .show_host_fabric_name = 1, | ||
780 | |||
781 | .get_fc_host_stats = csio_get_stats, | ||
782 | |||
783 | .dd_fcrport_size = sizeof(struct csio_rnode *), | ||
784 | .show_rport_maxframe_size = 1, | ||
785 | .show_rport_supported_classes = 1, | ||
786 | |||
787 | .set_rport_dev_loss_tmo = csio_set_rport_loss_tmo, | ||
788 | .show_rport_dev_loss_tmo = 1, | ||
789 | |||
790 | .show_starget_port_id = 1, | ||
791 | .show_starget_node_name = 1, | ||
792 | .show_starget_port_name = 1, | ||
793 | |||
794 | .dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk, | ||
795 | |||
796 | }; | ||
diff --git a/drivers/scsi/csiostor/csio_defs.h b/drivers/scsi/csiostor/csio_defs.h new file mode 100644 index 000000000000..c38017b4af98 --- /dev/null +++ b/drivers/scsi/csiostor/csio_defs.h | |||
@@ -0,0 +1,121 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef __CSIO_DEFS_H__ | ||
36 | #define __CSIO_DEFS_H__ | ||
37 | |||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/stddef.h> | ||
40 | #include <linux/timer.h> | ||
41 | #include <linux/list.h> | ||
42 | #include <linux/bug.h> | ||
43 | #include <linux/pci.h> | ||
44 | #include <linux/jiffies.h> | ||
45 | |||
46 | #define CSIO_INVALID_IDX 0xFFFFFFFF | ||
47 | #define CSIO_INC_STATS(elem, val) ((elem)->stats.val++) | ||
48 | #define CSIO_DEC_STATS(elem, val) ((elem)->stats.val--) | ||
49 | #define CSIO_VALID_WWN(__n) ((*__n >> 4) == 0x5 ? true : false) | ||
50 | #define CSIO_DID_MASK 0xFFFFFF | ||
51 | #define CSIO_WORD_TO_BYTE 4 | ||
52 | |||
53 | #ifndef readq | ||
54 | static inline u64 readq(void __iomem *addr) | ||
55 | { | ||
56 | return readl(addr) + ((u64)readl(addr + 4) << 32); | ||
57 | } | ||
58 | |||
59 | static inline void writeq(u64 val, void __iomem *addr) | ||
60 | { | ||
61 | writel(val, addr); | ||
62 | writel(val >> 32, addr + 4); | ||
63 | } | ||
64 | #endif | ||
65 | |||
66 | static inline int | ||
67 | csio_list_deleted(struct list_head *list) | ||
68 | { | ||
69 | return ((list->next == list) && (list->prev == list)); | ||
70 | } | ||
71 | |||
72 | #define csio_list_next(elem) (((struct list_head *)(elem))->next) | ||
73 | #define csio_list_prev(elem) (((struct list_head *)(elem))->prev) | ||
74 | |||
75 | /* State machine */ | ||
76 | typedef void (*csio_sm_state_t)(void *, uint32_t); | ||
77 | |||
78 | struct csio_sm { | ||
79 | struct list_head sm_list; | ||
80 | csio_sm_state_t sm_state; | ||
81 | }; | ||
82 | |||
83 | static inline void | ||
84 | csio_set_state(void *smp, void *state) | ||
85 | { | ||
86 | ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state; | ||
87 | } | ||
88 | |||
89 | static inline void | ||
90 | csio_init_state(struct csio_sm *smp, void *state) | ||
91 | { | ||
92 | csio_set_state(smp, state); | ||
93 | } | ||
94 | |||
95 | static inline void | ||
96 | csio_post_event(void *smp, uint32_t evt) | ||
97 | { | ||
98 | ((struct csio_sm *)smp)->sm_state(smp, evt); | ||
99 | } | ||
100 | |||
101 | static inline csio_sm_state_t | ||
102 | csio_get_state(void *smp) | ||
103 | { | ||
104 | return ((struct csio_sm *)smp)->sm_state; | ||
105 | } | ||
106 | |||
107 | static inline bool | ||
108 | csio_match_state(void *smp, void *state) | ||
109 | { | ||
110 | return (csio_get_state(smp) == (csio_sm_state_t)state); | ||
111 | } | ||
112 | |||
113 | #define CSIO_ASSERT(cond) BUG_ON(!(cond)) | ||
114 | |||
115 | #ifdef __CSIO_DEBUG__ | ||
116 | #define CSIO_DB_ASSERT(__c) CSIO_ASSERT((__c)) | ||
117 | #else | ||
118 | #define CSIO_DB_ASSERT(__c) | ||
119 | #endif | ||
120 | |||
121 | #endif /* ifndef __CSIO_DEFS_H__ */ | ||
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c new file mode 100644 index 000000000000..963c6c1d68b7 --- /dev/null +++ b/drivers/scsi/csiostor/csio_hw.c | |||
@@ -0,0 +1,4395 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/pci.h> | ||
36 | #include <linux/pci_regs.h> | ||
37 | #include <linux/firmware.h> | ||
38 | #include <linux/stddef.h> | ||
39 | #include <linux/delay.h> | ||
40 | #include <linux/string.h> | ||
41 | #include <linux/compiler.h> | ||
42 | #include <linux/jiffies.h> | ||
43 | #include <linux/kernel.h> | ||
44 | #include <linux/log2.h> | ||
45 | |||
46 | #include "csio_hw.h" | ||
47 | #include "csio_lnode.h" | ||
48 | #include "csio_rnode.h" | ||
49 | |||
50 | int csio_force_master; | ||
51 | int csio_dbg_level = 0xFEFF; | ||
52 | unsigned int csio_port_mask = 0xf; | ||
53 | |||
54 | /* Default FW event queue entries. */ | ||
55 | static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE; | ||
56 | |||
57 | /* Default MSI param level */ | ||
58 | int csio_msi = 2; | ||
59 | |||
60 | /* FCoE function instances */ | ||
61 | static int dev_num; | ||
62 | |||
63 | /* FCoE Adapter types & its description */ | ||
64 | static const struct csio_adap_desc csio_fcoe_adapters[] = { | ||
65 | {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"}, | ||
66 | {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"}, | ||
67 | {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"}, | ||
68 | {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"}, | ||
69 | {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"}, | ||
70 | {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"}, | ||
71 | {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"}, | ||
72 | {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"}, | ||
73 | {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"}, | ||
74 | {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"}, | ||
75 | {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"}, | ||
76 | {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"}, | ||
77 | {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"}, | ||
78 | {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"}, | ||
79 | {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"}, | ||
80 | {"T4 FPGA", "Chelsio T4 FPGA [FCoE]"} | ||
81 | }; | ||
82 | |||
83 | static void csio_mgmtm_cleanup(struct csio_mgmtm *); | ||
84 | static void csio_hw_mbm_cleanup(struct csio_hw *); | ||
85 | |||
86 | /* State machine forward declarations */ | ||
87 | static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev); | ||
88 | static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev); | ||
89 | static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev); | ||
90 | static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev); | ||
91 | static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev); | ||
92 | static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev); | ||
93 | static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev); | ||
94 | static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev); | ||
95 | static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev); | ||
96 | |||
97 | static void csio_hw_initialize(struct csio_hw *hw); | ||
98 | static void csio_evtq_stop(struct csio_hw *hw); | ||
99 | static void csio_evtq_start(struct csio_hw *hw); | ||
100 | |||
101 | int csio_is_hw_ready(struct csio_hw *hw) | ||
102 | { | ||
103 | return csio_match_state(hw, csio_hws_ready); | ||
104 | } | ||
105 | |||
106 | int csio_is_hw_removing(struct csio_hw *hw) | ||
107 | { | ||
108 | return csio_match_state(hw, csio_hws_removing); | ||
109 | } | ||
110 | |||
111 | |||
112 | /* | ||
113 | * csio_hw_wait_op_done_val - wait until an operation is completed | ||
114 | * @hw: the HW module | ||
115 | * @reg: the register to check for completion | ||
116 | * @mask: a single-bit field within @reg that indicates completion | ||
117 | * @polarity: the value of the field when the operation is completed | ||
118 | * @attempts: number of check iterations | ||
119 | * @delay: delay in usecs between iterations | ||
120 | * @valp: where to store the value of the register at completion time | ||
121 | * | ||
122 | * Wait until an operation is completed by checking a bit in a register | ||
123 | * up to @attempts times. If @valp is not NULL the value of the register | ||
124 | * at the time it indicated completion is stored there. Returns 0 if the | ||
125 | * operation completes and -EAGAIN otherwise. | ||
126 | */ | ||
127 | static int | ||
128 | csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask, | ||
129 | int polarity, int attempts, int delay, uint32_t *valp) | ||
130 | { | ||
131 | uint32_t val; | ||
132 | while (1) { | ||
133 | val = csio_rd_reg32(hw, reg); | ||
134 | |||
135 | if (!!(val & mask) == polarity) { | ||
136 | if (valp) | ||
137 | *valp = val; | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | if (--attempts == 0) | ||
142 | return -EAGAIN; | ||
143 | if (delay) | ||
144 | udelay(delay); | ||
145 | } | ||
146 | } | ||
147 | |||
148 | void | ||
149 | csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask, | ||
150 | uint32_t value) | ||
151 | { | ||
152 | uint32_t val = csio_rd_reg32(hw, reg) & ~mask; | ||
153 | |||
154 | csio_wr_reg32(hw, val | value, reg); | ||
155 | /* Flush */ | ||
156 | csio_rd_reg32(hw, reg); | ||
157 | |||
158 | } | ||
159 | |||
160 | /* | ||
161 | * csio_hw_mc_read - read from MC through backdoor accesses | ||
162 | * @hw: the hw module | ||
163 | * @addr: address of first byte requested | ||
164 | * @data: 64 bytes of data containing the requested address | ||
165 | * @ecc: where to store the corresponding 64-bit ECC word | ||
166 | * | ||
167 | * Read 64 bytes of data from MC starting at a 64-byte-aligned address | ||
168 | * that covers the requested address @addr. If @parity is not %NULL it | ||
169 | * is assigned the 64-bit ECC word for the read data. | ||
170 | */ | ||
171 | int | ||
172 | csio_hw_mc_read(struct csio_hw *hw, uint32_t addr, uint32_t *data, | ||
173 | uint64_t *ecc) | ||
174 | { | ||
175 | int i; | ||
176 | |||
177 | if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST) | ||
178 | return -EBUSY; | ||
179 | csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR); | ||
180 | csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN); | ||
181 | csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN); | ||
182 | csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1), | ||
183 | MC_BIST_CMD); | ||
184 | i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST, | ||
185 | 0, 10, 1, NULL); | ||
186 | if (i) | ||
187 | return i; | ||
188 | |||
189 | #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) | ||
190 | |||
191 | for (i = 15; i >= 0; i--) | ||
192 | *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i))); | ||
193 | if (ecc) | ||
194 | *ecc = csio_rd_reg64(hw, MC_DATA(16)); | ||
195 | #undef MC_DATA | ||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * csio_hw_edc_read - read from EDC through backdoor accesses | ||
201 | * @hw: the hw module | ||
202 | * @idx: which EDC to access | ||
203 | * @addr: address of first byte requested | ||
204 | * @data: 64 bytes of data containing the requested address | ||
205 | * @ecc: where to store the corresponding 64-bit ECC word | ||
206 | * | ||
207 | * Read 64 bytes of data from EDC starting at a 64-byte-aligned address | ||
208 | * that covers the requested address @addr. If @parity is not %NULL it | ||
209 | * is assigned the 64-bit ECC word for the read data. | ||
210 | */ | ||
211 | int | ||
212 | csio_hw_edc_read(struct csio_hw *hw, int idx, uint32_t addr, uint32_t *data, | ||
213 | uint64_t *ecc) | ||
214 | { | ||
215 | int i; | ||
216 | |||
217 | idx *= EDC_STRIDE; | ||
218 | if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST) | ||
219 | return -EBUSY; | ||
220 | csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx); | ||
221 | csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx); | ||
222 | csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx); | ||
223 | csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST, | ||
224 | EDC_BIST_CMD + idx); | ||
225 | i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST, | ||
226 | 0, 10, 1, NULL); | ||
227 | if (i) | ||
228 | return i; | ||
229 | |||
230 | #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) | ||
231 | |||
232 | for (i = 15; i >= 0; i--) | ||
233 | *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i))); | ||
234 | if (ecc) | ||
235 | *ecc = csio_rd_reg64(hw, EDC_DATA(16)); | ||
236 | #undef EDC_DATA | ||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * csio_mem_win_rw - read/write memory through PCIE memory window | ||
242 | * @hw: the adapter | ||
243 | * @addr: address of first byte requested | ||
244 | * @data: MEMWIN0_APERTURE bytes of data containing the requested address | ||
245 | * @dir: direction of transfer 1 => read, 0 => write | ||
246 | * | ||
247 | * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a | ||
248 | * MEMWIN0_APERTURE-byte-aligned address that covers the requested | ||
249 | * address @addr. | ||
250 | */ | ||
251 | static int | ||
252 | csio_mem_win_rw(struct csio_hw *hw, u32 addr, __be32 *data, int dir) | ||
253 | { | ||
254 | int i; | ||
255 | |||
256 | /* | ||
257 | * Setup offset into PCIE memory window. Address must be a | ||
258 | * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to | ||
259 | * ensure that changes propagate before we attempt to use the new | ||
260 | * values.) | ||
261 | */ | ||
262 | csio_wr_reg32(hw, addr & ~(MEMWIN0_APERTURE - 1), | ||
263 | PCIE_MEM_ACCESS_OFFSET); | ||
264 | csio_rd_reg32(hw, PCIE_MEM_ACCESS_OFFSET); | ||
265 | |||
266 | /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */ | ||
267 | for (i = 0; i < MEMWIN0_APERTURE; i = i + sizeof(__be32)) { | ||
268 | if (dir) | ||
269 | *data++ = csio_rd_reg32(hw, (MEMWIN0_BASE + i)); | ||
270 | else | ||
271 | csio_wr_reg32(hw, *data++, (MEMWIN0_BASE + i)); | ||
272 | } | ||
273 | |||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | /* | ||
278 | * csio_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window | ||
279 | * @hw: the csio_hw | ||
280 | * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC | ||
281 | * @addr: address within indicated memory type | ||
282 | * @len: amount of memory to transfer | ||
283 | * @buf: host memory buffer | ||
284 | * @dir: direction of transfer 1 => read, 0 => write | ||
285 | * | ||
286 | * Reads/writes an [almost] arbitrary memory region in the firmware: the | ||
287 | * firmware memory address, length and host buffer must be aligned on | ||
288 | * 32-bit boudaries. The memory is transferred as a raw byte sequence | ||
289 | * from/to the firmware's memory. If this memory contains data | ||
290 | * structures which contain multi-byte integers, it's the callers | ||
291 | * responsibility to perform appropriate byte order conversions. | ||
292 | */ | ||
293 | static int | ||
294 | csio_memory_rw(struct csio_hw *hw, int mtype, u32 addr, u32 len, | ||
295 | uint32_t *buf, int dir) | ||
296 | { | ||
297 | uint32_t pos, start, end, offset, memoffset; | ||
298 | int ret; | ||
299 | __be32 *data; | ||
300 | |||
301 | /* | ||
302 | * Argument sanity checks ... | ||
303 | */ | ||
304 | if ((addr & 0x3) || (len & 0x3)) | ||
305 | return -EINVAL; | ||
306 | |||
307 | data = kzalloc(MEMWIN0_APERTURE, GFP_KERNEL); | ||
308 | if (!data) | ||
309 | return -ENOMEM; | ||
310 | |||
311 | /* Offset into the region of memory which is being accessed | ||
312 | * MEM_EDC0 = 0 | ||
313 | * MEM_EDC1 = 1 | ||
314 | * MEM_MC = 2 | ||
315 | */ | ||
316 | memoffset = (mtype * (5 * 1024 * 1024)); | ||
317 | |||
318 | /* Determine the PCIE_MEM_ACCESS_OFFSET */ | ||
319 | addr = addr + memoffset; | ||
320 | |||
321 | /* | ||
322 | * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes | ||
323 | * at a time so we need to round down the start and round up the end. | ||
324 | * We'll start copying out of the first line at (addr - start) a word | ||
325 | * at a time. | ||
326 | */ | ||
327 | start = addr & ~(MEMWIN0_APERTURE-1); | ||
328 | end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1); | ||
329 | offset = (addr - start)/sizeof(__be32); | ||
330 | |||
331 | for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) { | ||
332 | /* | ||
333 | * If we're writing, copy the data from the caller's memory | ||
334 | * buffer | ||
335 | */ | ||
336 | if (!dir) { | ||
337 | /* | ||
338 | * If we're doing a partial write, then we need to do | ||
339 | * a read-modify-write ... | ||
340 | */ | ||
341 | if (offset || len < MEMWIN0_APERTURE) { | ||
342 | ret = csio_mem_win_rw(hw, pos, data, 1); | ||
343 | if (ret) { | ||
344 | kfree(data); | ||
345 | return ret; | ||
346 | } | ||
347 | } | ||
348 | while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && | ||
349 | len > 0) { | ||
350 | data[offset++] = *buf++; | ||
351 | len -= sizeof(__be32); | ||
352 | } | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * Transfer a block of memory and bail if there's an error. | ||
357 | */ | ||
358 | ret = csio_mem_win_rw(hw, pos, data, dir); | ||
359 | if (ret) { | ||
360 | kfree(data); | ||
361 | return ret; | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * If we're reading, copy the data into the caller's memory | ||
366 | * buffer. | ||
367 | */ | ||
368 | if (dir) | ||
369 | while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && | ||
370 | len > 0) { | ||
371 | *buf++ = data[offset++]; | ||
372 | len -= sizeof(__be32); | ||
373 | } | ||
374 | } | ||
375 | |||
376 | kfree(data); | ||
377 | |||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | static int | ||
382 | csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, __be32 *buf) | ||
383 | { | ||
384 | return csio_memory_rw(hw, mtype, addr, len, buf, 0); | ||
385 | } | ||
386 | |||
387 | /* | ||
388 | * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. | ||
389 | */ | ||
390 | #define EEPROM_MAX_RD_POLL 40 | ||
391 | #define EEPROM_MAX_WR_POLL 6 | ||
392 | #define EEPROM_STAT_ADDR 0x7bfc | ||
393 | #define VPD_BASE 0x400 | ||
394 | #define VPD_BASE_OLD 0 | ||
395 | #define VPD_LEN 512 | ||
396 | #define VPD_INFO_FLD_HDR_SIZE 3 | ||
397 | |||
398 | /* | ||
399 | * csio_hw_seeprom_read - read a serial EEPROM location | ||
400 | * @hw: hw to read | ||
401 | * @addr: EEPROM virtual address | ||
402 | * @data: where to store the read data | ||
403 | * | ||
404 | * Read a 32-bit word from a location in serial EEPROM using the card's PCI | ||
405 | * VPD capability. Note that this function must be called with a virtual | ||
406 | * address. | ||
407 | */ | ||
408 | static int | ||
409 | csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data) | ||
410 | { | ||
411 | uint16_t val = 0; | ||
412 | int attempts = EEPROM_MAX_RD_POLL; | ||
413 | uint32_t base = hw->params.pci.vpd_cap_addr; | ||
414 | |||
415 | if (addr >= EEPROMVSIZE || (addr & 3)) | ||
416 | return -EINVAL; | ||
417 | |||
418 | pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr); | ||
419 | |||
420 | do { | ||
421 | udelay(10); | ||
422 | pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val); | ||
423 | } while (!(val & PCI_VPD_ADDR_F) && --attempts); | ||
424 | |||
425 | if (!(val & PCI_VPD_ADDR_F)) { | ||
426 | csio_err(hw, "reading EEPROM address 0x%x failed\n", addr); | ||
427 | return -EINVAL; | ||
428 | } | ||
429 | |||
430 | pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data); | ||
431 | *data = le32_to_cpu(*data); | ||
432 | return 0; | ||
433 | } | ||
434 | |||
435 | /* | ||
436 | * Partial EEPROM Vital Product Data structure. Includes only the ID and | ||
437 | * VPD-R sections. | ||
438 | */ | ||
439 | struct t4_vpd_hdr { | ||
440 | u8 id_tag; | ||
441 | u8 id_len[2]; | ||
442 | u8 id_data[ID_LEN]; | ||
443 | u8 vpdr_tag; | ||
444 | u8 vpdr_len[2]; | ||
445 | }; | ||
446 | |||
447 | /* | ||
448 | * csio_hw_get_vpd_keyword_val - Locates an information field keyword in | ||
449 | * the VPD | ||
450 | * @v: Pointer to buffered vpd data structure | ||
451 | * @kw: The keyword to search for | ||
452 | * | ||
453 | * Returns the value of the information field keyword or | ||
454 | * -EINVAL otherwise. | ||
455 | */ | ||
456 | static int | ||
457 | csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) | ||
458 | { | ||
459 | int32_t i; | ||
460 | int32_t offset , len; | ||
461 | const uint8_t *buf = &v->id_tag; | ||
462 | const uint8_t *vpdr_len = &v->vpdr_tag; | ||
463 | offset = sizeof(struct t4_vpd_hdr); | ||
464 | len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8); | ||
465 | |||
466 | if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) | ||
467 | return -EINVAL; | ||
468 | |||
469 | for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) { | ||
470 | if (memcmp(buf + i , kw, 2) == 0) { | ||
471 | i += VPD_INFO_FLD_HDR_SIZE; | ||
472 | return i; | ||
473 | } | ||
474 | |||
475 | i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; | ||
476 | } | ||
477 | |||
478 | return -EINVAL; | ||
479 | } | ||
480 | |||
481 | static int | ||
482 | csio_pci_capability(struct pci_dev *pdev, int cap, int *pos) | ||
483 | { | ||
484 | *pos = pci_find_capability(pdev, cap); | ||
485 | if (*pos) | ||
486 | return 0; | ||
487 | |||
488 | return -1; | ||
489 | } | ||
490 | |||
491 | /* | ||
492 | * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM | ||
493 | * @hw: HW module | ||
494 | * @p: where to store the parameters | ||
495 | * | ||
496 | * Reads card parameters stored in VPD EEPROM. | ||
497 | */ | ||
498 | static int | ||
499 | csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p) | ||
500 | { | ||
501 | int i, ret, ec, sn, addr; | ||
502 | uint8_t *vpd, csum; | ||
503 | const struct t4_vpd_hdr *v; | ||
504 | /* To get around compilation warning from strstrip */ | ||
505 | char *s; | ||
506 | |||
507 | if (csio_is_valid_vpd(hw)) | ||
508 | return 0; | ||
509 | |||
510 | ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD, | ||
511 | &hw->params.pci.vpd_cap_addr); | ||
512 | if (ret) | ||
513 | return -EINVAL; | ||
514 | |||
515 | vpd = kzalloc(VPD_LEN, GFP_ATOMIC); | ||
516 | if (vpd == NULL) | ||
517 | return -ENOMEM; | ||
518 | |||
519 | /* | ||
520 | * Card information normally starts at VPD_BASE but early cards had | ||
521 | * it at 0. | ||
522 | */ | ||
523 | ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd)); | ||
524 | addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; | ||
525 | |||
526 | for (i = 0; i < VPD_LEN; i += 4) { | ||
527 | ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i)); | ||
528 | if (ret) { | ||
529 | kfree(vpd); | ||
530 | return ret; | ||
531 | } | ||
532 | } | ||
533 | |||
534 | /* Reset the VPD flag! */ | ||
535 | hw->flags &= (~CSIO_HWF_VPD_VALID); | ||
536 | |||
537 | v = (const struct t4_vpd_hdr *)vpd; | ||
538 | |||
539 | #define FIND_VPD_KW(var, name) do { \ | ||
540 | var = csio_hw_get_vpd_keyword_val(v, name); \ | ||
541 | if (var < 0) { \ | ||
542 | csio_err(hw, "missing VPD keyword " name "\n"); \ | ||
543 | kfree(vpd); \ | ||
544 | return -EINVAL; \ | ||
545 | } \ | ||
546 | } while (0) | ||
547 | |||
548 | FIND_VPD_KW(i, "RV"); | ||
549 | for (csum = 0; i >= 0; i--) | ||
550 | csum += vpd[i]; | ||
551 | |||
552 | if (csum) { | ||
553 | csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum); | ||
554 | kfree(vpd); | ||
555 | return -EINVAL; | ||
556 | } | ||
557 | FIND_VPD_KW(ec, "EC"); | ||
558 | FIND_VPD_KW(sn, "SN"); | ||
559 | #undef FIND_VPD_KW | ||
560 | |||
561 | memcpy(p->id, v->id_data, ID_LEN); | ||
562 | s = strstrip(p->id); | ||
563 | memcpy(p->ec, vpd + ec, EC_LEN); | ||
564 | s = strstrip(p->ec); | ||
565 | i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; | ||
566 | memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); | ||
567 | s = strstrip(p->sn); | ||
568 | |||
569 | csio_valid_vpd_copied(hw); | ||
570 | |||
571 | kfree(vpd); | ||
572 | return 0; | ||
573 | } | ||
574 | |||
575 | /* | ||
576 | * csio_hw_sf1_read - read data from the serial flash | ||
577 | * @hw: the HW module | ||
578 | * @byte_cnt: number of bytes to read | ||
579 | * @cont: whether another operation will be chained | ||
580 | * @lock: whether to lock SF for PL access only | ||
581 | * @valp: where to store the read data | ||
582 | * | ||
583 | * Reads up to 4 bytes of data from the serial flash. The location of | ||
584 | * the read needs to be specified prior to calling this by issuing the | ||
585 | * appropriate commands to the serial flash. | ||
586 | */ | ||
587 | static int | ||
588 | csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont, | ||
589 | int32_t lock, uint32_t *valp) | ||
590 | { | ||
591 | int ret; | ||
592 | |||
593 | if (!byte_cnt || byte_cnt > 4) | ||
594 | return -EINVAL; | ||
595 | if (csio_rd_reg32(hw, SF_OP) & SF_BUSY) | ||
596 | return -EBUSY; | ||
597 | |||
598 | cont = cont ? SF_CONT : 0; | ||
599 | lock = lock ? SF_LOCK : 0; | ||
600 | |||
601 | csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP); | ||
602 | ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, | ||
603 | 10, NULL); | ||
604 | if (!ret) | ||
605 | *valp = csio_rd_reg32(hw, SF_DATA); | ||
606 | return ret; | ||
607 | } | ||
608 | |||
609 | /* | ||
610 | * csio_hw_sf1_write - write data to the serial flash | ||
611 | * @hw: the HW module | ||
612 | * @byte_cnt: number of bytes to write | ||
613 | * @cont: whether another operation will be chained | ||
614 | * @lock: whether to lock SF for PL access only | ||
615 | * @val: value to write | ||
616 | * | ||
617 | * Writes up to 4 bytes of data to the serial flash. The location of | ||
618 | * the write needs to be specified prior to calling this by issuing the | ||
619 | * appropriate commands to the serial flash. | ||
620 | */ | ||
621 | static int | ||
622 | csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont, | ||
623 | int32_t lock, uint32_t val) | ||
624 | { | ||
625 | if (!byte_cnt || byte_cnt > 4) | ||
626 | return -EINVAL; | ||
627 | if (csio_rd_reg32(hw, SF_OP) & SF_BUSY) | ||
628 | return -EBUSY; | ||
629 | |||
630 | cont = cont ? SF_CONT : 0; | ||
631 | lock = lock ? SF_LOCK : 0; | ||
632 | |||
633 | csio_wr_reg32(hw, val, SF_DATA); | ||
634 | csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP); | ||
635 | |||
636 | return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, | ||
637 | 10, NULL); | ||
638 | } | ||
639 | |||
640 | /* | ||
641 | * csio_hw_flash_wait_op - wait for a flash operation to complete | ||
642 | * @hw: the HW module | ||
643 | * @attempts: max number of polls of the status register | ||
644 | * @delay: delay between polls in ms | ||
645 | * | ||
646 | * Wait for a flash operation to complete by polling the status register. | ||
647 | */ | ||
648 | static int | ||
649 | csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay) | ||
650 | { | ||
651 | int ret; | ||
652 | uint32_t status; | ||
653 | |||
654 | while (1) { | ||
655 | ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS); | ||
656 | if (ret != 0) | ||
657 | return ret; | ||
658 | |||
659 | ret = csio_hw_sf1_read(hw, 1, 0, 1, &status); | ||
660 | if (ret != 0) | ||
661 | return ret; | ||
662 | |||
663 | if (!(status & 1)) | ||
664 | return 0; | ||
665 | if (--attempts == 0) | ||
666 | return -EAGAIN; | ||
667 | if (delay) | ||
668 | msleep(delay); | ||
669 | } | ||
670 | } | ||
671 | |||
672 | /* | ||
673 | * csio_hw_read_flash - read words from serial flash | ||
674 | * @hw: the HW module | ||
675 | * @addr: the start address for the read | ||
676 | * @nwords: how many 32-bit words to read | ||
677 | * @data: where to store the read data | ||
678 | * @byte_oriented: whether to store data as bytes or as words | ||
679 | * | ||
680 | * Read the specified number of 32-bit words from the serial flash. | ||
681 | * If @byte_oriented is set the read data is stored as a byte array | ||
682 | * (i.e., big-endian), otherwise as 32-bit words in the platform's | ||
683 | * natural endianess. | ||
684 | */ | ||
685 | static int | ||
686 | csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords, | ||
687 | uint32_t *data, int32_t byte_oriented) | ||
688 | { | ||
689 | int ret; | ||
690 | |||
691 | if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3)) | ||
692 | return -EINVAL; | ||
693 | |||
694 | addr = swab32(addr) | SF_RD_DATA_FAST; | ||
695 | |||
696 | ret = csio_hw_sf1_write(hw, 4, 1, 0, addr); | ||
697 | if (ret != 0) | ||
698 | return ret; | ||
699 | |||
700 | ret = csio_hw_sf1_read(hw, 1, 1, 0, data); | ||
701 | if (ret != 0) | ||
702 | return ret; | ||
703 | |||
704 | for ( ; nwords; nwords--, data++) { | ||
705 | ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data); | ||
706 | if (nwords == 1) | ||
707 | csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ | ||
708 | if (ret) | ||
709 | return ret; | ||
710 | if (byte_oriented) | ||
711 | *data = htonl(*data); | ||
712 | } | ||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | /* | ||
717 | * csio_hw_write_flash - write up to a page of data to the serial flash | ||
718 | * @hw: the hw | ||
719 | * @addr: the start address to write | ||
720 | * @n: length of data to write in bytes | ||
721 | * @data: the data to write | ||
722 | * | ||
723 | * Writes up to a page of data (256 bytes) to the serial flash starting | ||
724 | * at the given address. All the data must be written to the same page. | ||
725 | */ | ||
726 | static int | ||
727 | csio_hw_write_flash(struct csio_hw *hw, uint32_t addr, | ||
728 | uint32_t n, const uint8_t *data) | ||
729 | { | ||
730 | int ret = -EINVAL; | ||
731 | uint32_t buf[64]; | ||
732 | uint32_t i, c, left, val, offset = addr & 0xff; | ||
733 | |||
734 | if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE) | ||
735 | return -EINVAL; | ||
736 | |||
737 | val = swab32(addr) | SF_PROG_PAGE; | ||
738 | |||
739 | ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); | ||
740 | if (ret != 0) | ||
741 | goto unlock; | ||
742 | |||
743 | ret = csio_hw_sf1_write(hw, 4, 1, 1, val); | ||
744 | if (ret != 0) | ||
745 | goto unlock; | ||
746 | |||
747 | for (left = n; left; left -= c) { | ||
748 | c = min(left, 4U); | ||
749 | for (val = 0, i = 0; i < c; ++i) | ||
750 | val = (val << 8) + *data++; | ||
751 | |||
752 | ret = csio_hw_sf1_write(hw, c, c != left, 1, val); | ||
753 | if (ret) | ||
754 | goto unlock; | ||
755 | } | ||
756 | ret = csio_hw_flash_wait_op(hw, 8, 1); | ||
757 | if (ret) | ||
758 | goto unlock; | ||
759 | |||
760 | csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ | ||
761 | |||
762 | /* Read the page to verify the write succeeded */ | ||
763 | ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); | ||
764 | if (ret) | ||
765 | return ret; | ||
766 | |||
767 | if (memcmp(data - n, (uint8_t *)buf + offset, n)) { | ||
768 | csio_err(hw, | ||
769 | "failed to correctly write the flash page at %#x\n", | ||
770 | addr); | ||
771 | return -EINVAL; | ||
772 | } | ||
773 | |||
774 | return 0; | ||
775 | |||
776 | unlock: | ||
777 | csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ | ||
778 | return ret; | ||
779 | } | ||
780 | |||
781 | /* | ||
782 | * csio_hw_flash_erase_sectors - erase a range of flash sectors | ||
783 | * @hw: the HW module | ||
784 | * @start: the first sector to erase | ||
785 | * @end: the last sector to erase | ||
786 | * | ||
787 | * Erases the sectors in the given inclusive range. | ||
788 | */ | ||
789 | static int | ||
790 | csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end) | ||
791 | { | ||
792 | int ret = 0; | ||
793 | |||
794 | while (start <= end) { | ||
795 | |||
796 | ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); | ||
797 | if (ret != 0) | ||
798 | goto out; | ||
799 | |||
800 | ret = csio_hw_sf1_write(hw, 4, 0, 1, | ||
801 | SF_ERASE_SECTOR | (start << 8)); | ||
802 | if (ret != 0) | ||
803 | goto out; | ||
804 | |||
805 | ret = csio_hw_flash_wait_op(hw, 14, 500); | ||
806 | if (ret != 0) | ||
807 | goto out; | ||
808 | |||
809 | start++; | ||
810 | } | ||
811 | out: | ||
812 | if (ret) | ||
813 | csio_err(hw, "erase of flash sector %d failed, error %d\n", | ||
814 | start, ret); | ||
815 | csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ | ||
816 | return 0; | ||
817 | } | ||
818 | |||
819 | /* | ||
820 | * csio_hw_flash_cfg_addr - return the address of the flash | ||
821 | * configuration file | ||
822 | * @hw: the HW module | ||
823 | * | ||
824 | * Return the address within the flash where the Firmware Configuration | ||
825 | * File is stored. | ||
826 | */ | ||
827 | static unsigned int | ||
828 | csio_hw_flash_cfg_addr(struct csio_hw *hw) | ||
829 | { | ||
830 | if (hw->params.sf_size == 0x100000) | ||
831 | return FPGA_FLASH_CFG_OFFSET; | ||
832 | else | ||
833 | return FLASH_CFG_OFFSET; | ||
834 | } | ||
835 | |||
836 | static void | ||
837 | csio_hw_print_fw_version(struct csio_hw *hw, char *str) | ||
838 | { | ||
839 | csio_info(hw, "%s: %u.%u.%u.%u\n", str, | ||
840 | FW_HDR_FW_VER_MAJOR_GET(hw->fwrev), | ||
841 | FW_HDR_FW_VER_MINOR_GET(hw->fwrev), | ||
842 | FW_HDR_FW_VER_MICRO_GET(hw->fwrev), | ||
843 | FW_HDR_FW_VER_BUILD_GET(hw->fwrev)); | ||
844 | } | ||
845 | |||
846 | /* | ||
847 | * csio_hw_get_fw_version - read the firmware version | ||
848 | * @hw: HW module | ||
849 | * @vers: where to place the version | ||
850 | * | ||
851 | * Reads the FW version from flash. | ||
852 | */ | ||
853 | static int | ||
854 | csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers) | ||
855 | { | ||
856 | return csio_hw_read_flash(hw, FW_IMG_START + | ||
857 | offsetof(struct fw_hdr, fw_ver), 1, | ||
858 | vers, 0); | ||
859 | } | ||
860 | |||
861 | /* | ||
862 | * csio_hw_get_tp_version - read the TP microcode version | ||
863 | * @hw: HW module | ||
864 | * @vers: where to place the version | ||
865 | * | ||
866 | * Reads the TP microcode version from flash. | ||
867 | */ | ||
868 | static int | ||
869 | csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers) | ||
870 | { | ||
871 | return csio_hw_read_flash(hw, FLASH_FW_START + | ||
872 | offsetof(struct fw_hdr, tp_microcode_ver), 1, | ||
873 | vers, 0); | ||
874 | } | ||
875 | |||
876 | /* | ||
877 | * csio_hw_check_fw_version - check if the FW is compatible with | ||
878 | * this driver | ||
879 | * @hw: HW module | ||
880 | * | ||
881 | * Checks if an adapter's FW is compatible with the driver. Returns 0 | ||
882 | * if there's exact match, a negative error if the version could not be | ||
883 | * read or there's a major/minor version mismatch/minor. | ||
884 | */ | ||
885 | static int | ||
886 | csio_hw_check_fw_version(struct csio_hw *hw) | ||
887 | { | ||
888 | int ret, major, minor, micro; | ||
889 | |||
890 | ret = csio_hw_get_fw_version(hw, &hw->fwrev); | ||
891 | if (!ret) | ||
892 | ret = csio_hw_get_tp_version(hw, &hw->tp_vers); | ||
893 | if (ret) | ||
894 | return ret; | ||
895 | |||
896 | major = FW_HDR_FW_VER_MAJOR_GET(hw->fwrev); | ||
897 | minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev); | ||
898 | micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev); | ||
899 | |||
900 | if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ | ||
901 | csio_err(hw, "card FW has major version %u, driver wants %u\n", | ||
902 | major, FW_VERSION_MAJOR); | ||
903 | return -EINVAL; | ||
904 | } | ||
905 | |||
906 | if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) | ||
907 | return 0; /* perfect match */ | ||
908 | |||
909 | /* Minor/micro version mismatch */ | ||
910 | return -EINVAL; | ||
911 | } | ||
912 | |||
913 | /* | ||
914 | * csio_hw_fw_dload - download firmware. | ||
915 | * @hw: HW module | ||
916 | * @fw_data: firmware image to write. | ||
917 | * @size: image size | ||
918 | * | ||
919 | * Write the supplied firmware image to the card's serial flash. | ||
920 | */ | ||
921 | static int | ||
922 | csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size) | ||
923 | { | ||
924 | uint32_t csum; | ||
925 | int32_t addr; | ||
926 | int ret; | ||
927 | uint32_t i; | ||
928 | uint8_t first_page[SF_PAGE_SIZE]; | ||
929 | const uint32_t *p = (const uint32_t *)fw_data; | ||
930 | struct fw_hdr *hdr = (struct fw_hdr *)fw_data; | ||
931 | uint32_t sf_sec_size; | ||
932 | |||
933 | if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) { | ||
934 | csio_err(hw, "Serial Flash data invalid\n"); | ||
935 | return -EINVAL; | ||
936 | } | ||
937 | |||
938 | if (!size) { | ||
939 | csio_err(hw, "FW image has no data\n"); | ||
940 | return -EINVAL; | ||
941 | } | ||
942 | |||
943 | if (size & 511) { | ||
944 | csio_err(hw, "FW image size not multiple of 512 bytes\n"); | ||
945 | return -EINVAL; | ||
946 | } | ||
947 | |||
948 | if (ntohs(hdr->len512) * 512 != size) { | ||
949 | csio_err(hw, "FW image size differs from size in FW header\n"); | ||
950 | return -EINVAL; | ||
951 | } | ||
952 | |||
953 | if (size > FW_MAX_SIZE) { | ||
954 | csio_err(hw, "FW image too large, max is %u bytes\n", | ||
955 | FW_MAX_SIZE); | ||
956 | return -EINVAL; | ||
957 | } | ||
958 | |||
959 | for (csum = 0, i = 0; i < size / sizeof(csum); i++) | ||
960 | csum += ntohl(p[i]); | ||
961 | |||
962 | if (csum != 0xffffffff) { | ||
963 | csio_err(hw, "corrupted firmware image, checksum %#x\n", csum); | ||
964 | return -EINVAL; | ||
965 | } | ||
966 | |||
967 | sf_sec_size = hw->params.sf_size / hw->params.sf_nsec; | ||
968 | i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ | ||
969 | |||
970 | csio_dbg(hw, "Erasing sectors... start:%d end:%d\n", | ||
971 | FW_START_SEC, FW_START_SEC + i - 1); | ||
972 | |||
973 | ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC, | ||
974 | FW_START_SEC + i - 1); | ||
975 | if (ret) { | ||
976 | csio_err(hw, "Flash Erase failed\n"); | ||
977 | goto out; | ||
978 | } | ||
979 | |||
980 | /* | ||
981 | * We write the correct version at the end so the driver can see a bad | ||
982 | * version if the FW write fails. Start by writing a copy of the | ||
983 | * first page with a bad version. | ||
984 | */ | ||
985 | memcpy(first_page, fw_data, SF_PAGE_SIZE); | ||
986 | ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); | ||
987 | ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page); | ||
988 | if (ret) | ||
989 | goto out; | ||
990 | |||
991 | csio_dbg(hw, "Writing Flash .. start:%d end:%d\n", | ||
992 | FW_IMG_START, FW_IMG_START + size); | ||
993 | |||
994 | addr = FW_IMG_START; | ||
995 | for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { | ||
996 | addr += SF_PAGE_SIZE; | ||
997 | fw_data += SF_PAGE_SIZE; | ||
998 | ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data); | ||
999 | if (ret) | ||
1000 | goto out; | ||
1001 | } | ||
1002 | |||
1003 | ret = csio_hw_write_flash(hw, | ||
1004 | FW_IMG_START + | ||
1005 | offsetof(struct fw_hdr, fw_ver), | ||
1006 | sizeof(hdr->fw_ver), | ||
1007 | (const uint8_t *)&hdr->fw_ver); | ||
1008 | |||
1009 | out: | ||
1010 | if (ret) | ||
1011 | csio_err(hw, "firmware download failed, error %d\n", ret); | ||
1012 | return ret; | ||
1013 | } | ||
1014 | |||
1015 | static int | ||
1016 | csio_hw_get_flash_params(struct csio_hw *hw) | ||
1017 | { | ||
1018 | int ret; | ||
1019 | uint32_t info = 0; | ||
1020 | |||
1021 | ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID); | ||
1022 | if (!ret) | ||
1023 | ret = csio_hw_sf1_read(hw, 3, 0, 1, &info); | ||
1024 | csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ | ||
1025 | if (ret != 0) | ||
1026 | return ret; | ||
1027 | |||
1028 | if ((info & 0xff) != 0x20) /* not a Numonix flash */ | ||
1029 | return -EINVAL; | ||
1030 | info >>= 16; /* log2 of size */ | ||
1031 | if (info >= 0x14 && info < 0x18) | ||
1032 | hw->params.sf_nsec = 1 << (info - 16); | ||
1033 | else if (info == 0x18) | ||
1034 | hw->params.sf_nsec = 64; | ||
1035 | else | ||
1036 | return -EINVAL; | ||
1037 | hw->params.sf_size = 1 << info; | ||
1038 | |||
1039 | return 0; | ||
1040 | } | ||
1041 | |||
1042 | static void | ||
1043 | csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range) | ||
1044 | { | ||
1045 | uint16_t val; | ||
1046 | uint32_t pcie_cap; | ||
1047 | |||
1048 | if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) { | ||
1049 | pci_read_config_word(hw->pdev, | ||
1050 | pcie_cap + PCI_EXP_DEVCTL2, &val); | ||
1051 | val &= 0xfff0; | ||
1052 | val |= range ; | ||
1053 | pci_write_config_word(hw->pdev, | ||
1054 | pcie_cap + PCI_EXP_DEVCTL2, val); | ||
1055 | } | ||
1056 | } | ||
1057 | |||
1058 | |||
1059 | /* | ||
1060 | * Return the specified PCI-E Configuration Space register from our Physical | ||
1061 | * Function. We try first via a Firmware LDST Command since we prefer to let | ||
1062 | * the firmware own all of these registers, but if that fails we go for it | ||
1063 | * directly ourselves. | ||
1064 | */ | ||
1065 | static uint32_t | ||
1066 | csio_read_pcie_cfg4(struct csio_hw *hw, int reg) | ||
1067 | { | ||
1068 | u32 val = 0; | ||
1069 | struct csio_mb *mbp; | ||
1070 | int rv; | ||
1071 | struct fw_ldst_cmd *ldst_cmd; | ||
1072 | |||
1073 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
1074 | if (!mbp) { | ||
1075 | CSIO_INC_STATS(hw, n_err_nomem); | ||
1076 | pci_read_config_dword(hw->pdev, reg, &val); | ||
1077 | return val; | ||
1078 | } | ||
1079 | |||
1080 | csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg); | ||
1081 | |||
1082 | rv = csio_mb_issue(hw, mbp); | ||
1083 | |||
1084 | /* | ||
1085 | * If the LDST Command suucceeded, exctract the returned register | ||
1086 | * value. Otherwise read it directly ourself. | ||
1087 | */ | ||
1088 | if (rv == 0) { | ||
1089 | ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb); | ||
1090 | val = ntohl(ldst_cmd->u.pcie.data[0]); | ||
1091 | } else | ||
1092 | pci_read_config_dword(hw->pdev, reg, &val); | ||
1093 | |||
1094 | mempool_free(mbp, hw->mb_mempool); | ||
1095 | |||
1096 | return val; | ||
1097 | } /* csio_read_pcie_cfg4 */ | ||
1098 | |||
1099 | static int | ||
1100 | csio_hw_set_mem_win(struct csio_hw *hw) | ||
1101 | { | ||
1102 | u32 bar0; | ||
1103 | |||
1104 | /* | ||
1105 | * Truncation intentional: we only read the bottom 32-bits of the | ||
1106 | * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to | ||
1107 | * read BAR0 instead of using pci_resource_start() because we could be | ||
1108 | * operating from within a Virtual Machine which is trapping our | ||
1109 | * accesses to our Configuration Space and we need to set up the PCI-E | ||
1110 | * Memory Window decoders with the actual addresses which will be | ||
1111 | * coming across the PCI-E link. | ||
1112 | */ | ||
1113 | bar0 = csio_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0); | ||
1114 | bar0 &= PCI_BASE_ADDRESS_MEM_MASK; | ||
1115 | |||
1116 | /* | ||
1117 | * Set up memory window for accessing adapter memory ranges. (Read | ||
1118 | * back MA register to ensure that changes propagate before we attempt | ||
1119 | * to use the new values.) | ||
1120 | */ | ||
1121 | csio_wr_reg32(hw, (bar0 + MEMWIN0_BASE) | BIR(0) | | ||
1122 | WINDOW(ilog2(MEMWIN0_APERTURE) - 10), | ||
1123 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0)); | ||
1124 | csio_wr_reg32(hw, (bar0 + MEMWIN1_BASE) | BIR(0) | | ||
1125 | WINDOW(ilog2(MEMWIN1_APERTURE) - 10), | ||
1126 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1)); | ||
1127 | csio_wr_reg32(hw, (bar0 + MEMWIN2_BASE) | BIR(0) | | ||
1128 | WINDOW(ilog2(MEMWIN2_APERTURE) - 10), | ||
1129 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2)); | ||
1130 | csio_rd_reg32(hw, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2)); | ||
1131 | return 0; | ||
1132 | } /* csio_hw_set_mem_win */ | ||
1133 | |||
1134 | |||
1135 | |||
1136 | /*****************************************************************************/ | ||
1137 | /* HW State machine assists */ | ||
1138 | /*****************************************************************************/ | ||
1139 | |||
1140 | static int | ||
1141 | csio_hw_dev_ready(struct csio_hw *hw) | ||
1142 | { | ||
1143 | uint32_t reg; | ||
1144 | int cnt = 6; | ||
1145 | |||
1146 | while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) && | ||
1147 | (--cnt != 0)) | ||
1148 | mdelay(100); | ||
1149 | |||
1150 | if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) || | ||
1151 | (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) { | ||
1152 | csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt); | ||
1153 | return -EIO; | ||
1154 | } | ||
1155 | |||
1156 | hw->pfn = SOURCEPF_GET(reg); | ||
1157 | |||
1158 | return 0; | ||
1159 | } | ||
1160 | |||
1161 | /* | ||
1162 | * csio_do_hello - Perform the HELLO FW Mailbox command and process response. | ||
1163 | * @hw: HW module | ||
1164 | * @state: Device state | ||
1165 | * | ||
1166 | * FW_HELLO_CMD has to be polled for completion. | ||
1167 | */ | ||
1168 | static int | ||
1169 | csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state) | ||
1170 | { | ||
1171 | struct csio_mb *mbp; | ||
1172 | int rv = 0; | ||
1173 | enum csio_dev_master master; | ||
1174 | enum fw_retval retval; | ||
1175 | uint8_t mpfn; | ||
1176 | char state_str[16]; | ||
1177 | int retries = FW_CMD_HELLO_RETRIES; | ||
1178 | |||
1179 | memset(state_str, 0, sizeof(state_str)); | ||
1180 | |||
1181 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
1182 | if (!mbp) { | ||
1183 | rv = -ENOMEM; | ||
1184 | CSIO_INC_STATS(hw, n_err_nomem); | ||
1185 | goto out; | ||
1186 | } | ||
1187 | |||
1188 | master = csio_force_master ? CSIO_MASTER_MUST : CSIO_MASTER_MAY; | ||
1189 | |||
1190 | retry: | ||
1191 | csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, | ||
1192 | hw->pfn, master, NULL); | ||
1193 | |||
1194 | rv = csio_mb_issue(hw, mbp); | ||
1195 | if (rv) { | ||
1196 | csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv); | ||
1197 | goto out_free_mb; | ||
1198 | } | ||
1199 | |||
1200 | csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn); | ||
1201 | if (retval != FW_SUCCESS) { | ||
1202 | csio_err(hw, "HELLO cmd failed with ret: %d\n", retval); | ||
1203 | rv = -EINVAL; | ||
1204 | goto out_free_mb; | ||
1205 | } | ||
1206 | |||
1207 | /* Firmware has designated us to be master */ | ||
1208 | if (hw->pfn == mpfn) { | ||
1209 | hw->flags |= CSIO_HWF_MASTER; | ||
1210 | } else if (*state == CSIO_DEV_STATE_UNINIT) { | ||
1211 | /* | ||
1212 | * If we're not the Master PF then we need to wait around for | ||
1213 | * the Master PF Driver to finish setting up the adapter. | ||
1214 | * | ||
1215 | * Note that we also do this wait if we're a non-Master-capable | ||
1216 | * PF and there is no current Master PF; a Master PF may show up | ||
1217 | * momentarily and we wouldn't want to fail pointlessly. (This | ||
1218 | * can happen when an OS loads lots of different drivers rapidly | ||
1219 | * at the same time). In this case, the Master PF returned by | ||
1220 | * the firmware will be PCIE_FW_MASTER_MASK so the test below | ||
1221 | * will work ... | ||
1222 | */ | ||
1223 | |||
1224 | int waiting = FW_CMD_HELLO_TIMEOUT; | ||
1225 | |||
1226 | /* | ||
1227 | * Wait for the firmware to either indicate an error or | ||
1228 | * initialized state. If we see either of these we bail out | ||
1229 | * and report the issue to the caller. If we exhaust the | ||
1230 | * "hello timeout" and we haven't exhausted our retries, try | ||
1231 | * again. Otherwise bail with a timeout error. | ||
1232 | */ | ||
1233 | for (;;) { | ||
1234 | uint32_t pcie_fw; | ||
1235 | |||
1236 | msleep(50); | ||
1237 | waiting -= 50; | ||
1238 | |||
1239 | /* | ||
1240 | * If neither Error nor Initialialized are indicated | ||
1241 | * by the firmware keep waiting till we exaust our | ||
1242 | * timeout ... and then retry if we haven't exhausted | ||
1243 | * our retries ... | ||
1244 | */ | ||
1245 | pcie_fw = csio_rd_reg32(hw, PCIE_FW); | ||
1246 | if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) { | ||
1247 | if (waiting <= 0) { | ||
1248 | if (retries-- > 0) | ||
1249 | goto retry; | ||
1250 | |||
1251 | rv = -ETIMEDOUT; | ||
1252 | break; | ||
1253 | } | ||
1254 | continue; | ||
1255 | } | ||
1256 | |||
1257 | /* | ||
1258 | * We either have an Error or Initialized condition | ||
1259 | * report errors preferentially. | ||
1260 | */ | ||
1261 | if (state) { | ||
1262 | if (pcie_fw & PCIE_FW_ERR) { | ||
1263 | *state = CSIO_DEV_STATE_ERR; | ||
1264 | rv = -ETIMEDOUT; | ||
1265 | } else if (pcie_fw & PCIE_FW_INIT) | ||
1266 | *state = CSIO_DEV_STATE_INIT; | ||
1267 | } | ||
1268 | |||
1269 | /* | ||
1270 | * If we arrived before a Master PF was selected and | ||
1271 | * there's not a valid Master PF, grab its identity | ||
1272 | * for our caller. | ||
1273 | */ | ||
1274 | if (mpfn == PCIE_FW_MASTER_MASK && | ||
1275 | (pcie_fw & PCIE_FW_MASTER_VLD)) | ||
1276 | mpfn = PCIE_FW_MASTER_GET(pcie_fw); | ||
1277 | break; | ||
1278 | } | ||
1279 | hw->flags &= ~CSIO_HWF_MASTER; | ||
1280 | } | ||
1281 | |||
1282 | switch (*state) { | ||
1283 | case CSIO_DEV_STATE_UNINIT: | ||
1284 | strcpy(state_str, "Initializing"); | ||
1285 | break; | ||
1286 | case CSIO_DEV_STATE_INIT: | ||
1287 | strcpy(state_str, "Initialized"); | ||
1288 | break; | ||
1289 | case CSIO_DEV_STATE_ERR: | ||
1290 | strcpy(state_str, "Error"); | ||
1291 | break; | ||
1292 | default: | ||
1293 | strcpy(state_str, "Unknown"); | ||
1294 | break; | ||
1295 | } | ||
1296 | |||
1297 | if (hw->pfn == mpfn) | ||
1298 | csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n", | ||
1299 | hw->pfn, state_str); | ||
1300 | else | ||
1301 | csio_info(hw, | ||
1302 | "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n", | ||
1303 | hw->pfn, mpfn, state_str); | ||
1304 | |||
1305 | out_free_mb: | ||
1306 | mempool_free(mbp, hw->mb_mempool); | ||
1307 | out: | ||
1308 | return rv; | ||
1309 | } | ||
1310 | |||
1311 | /* | ||
1312 | * csio_do_bye - Perform the BYE FW Mailbox command and process response. | ||
1313 | * @hw: HW module | ||
1314 | * | ||
1315 | */ | ||
1316 | static int | ||
1317 | csio_do_bye(struct csio_hw *hw) | ||
1318 | { | ||
1319 | struct csio_mb *mbp; | ||
1320 | enum fw_retval retval; | ||
1321 | |||
1322 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
1323 | if (!mbp) { | ||
1324 | CSIO_INC_STATS(hw, n_err_nomem); | ||
1325 | return -ENOMEM; | ||
1326 | } | ||
1327 | |||
1328 | csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); | ||
1329 | |||
1330 | if (csio_mb_issue(hw, mbp)) { | ||
1331 | csio_err(hw, "Issue of BYE command failed\n"); | ||
1332 | mempool_free(mbp, hw->mb_mempool); | ||
1333 | return -EINVAL; | ||
1334 | } | ||
1335 | |||
1336 | retval = csio_mb_fw_retval(mbp); | ||
1337 | if (retval != FW_SUCCESS) { | ||
1338 | mempool_free(mbp, hw->mb_mempool); | ||
1339 | return -EINVAL; | ||
1340 | } | ||
1341 | |||
1342 | mempool_free(mbp, hw->mb_mempool); | ||
1343 | |||
1344 | return 0; | ||
1345 | } | ||
1346 | |||
1347 | /* | ||
1348 | * csio_do_reset- Perform the device reset. | ||
1349 | * @hw: HW module | ||
1350 | * @fw_rst: FW reset | ||
1351 | * | ||
1352 | * If fw_rst is set, issues FW reset mbox cmd otherwise | ||
1353 | * does PIO reset. | ||
1354 | * Performs reset of the function. | ||
1355 | */ | ||
1356 | static int | ||
1357 | csio_do_reset(struct csio_hw *hw, bool fw_rst) | ||
1358 | { | ||
1359 | struct csio_mb *mbp; | ||
1360 | enum fw_retval retval; | ||
1361 | |||
1362 | if (!fw_rst) { | ||
1363 | /* PIO reset */ | ||
1364 | csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); | ||
1365 | mdelay(2000); | ||
1366 | return 0; | ||
1367 | } | ||
1368 | |||
1369 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
1370 | if (!mbp) { | ||
1371 | CSIO_INC_STATS(hw, n_err_nomem); | ||
1372 | return -ENOMEM; | ||
1373 | } | ||
1374 | |||
1375 | csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, | ||
1376 | PIORSTMODE | PIORST, 0, NULL); | ||
1377 | |||
1378 | if (csio_mb_issue(hw, mbp)) { | ||
1379 | csio_err(hw, "Issue of RESET command failed.n"); | ||
1380 | mempool_free(mbp, hw->mb_mempool); | ||
1381 | return -EINVAL; | ||
1382 | } | ||
1383 | |||
1384 | retval = csio_mb_fw_retval(mbp); | ||
1385 | if (retval != FW_SUCCESS) { | ||
1386 | csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval); | ||
1387 | mempool_free(mbp, hw->mb_mempool); | ||
1388 | return -EINVAL; | ||
1389 | } | ||
1390 | |||
1391 | mempool_free(mbp, hw->mb_mempool); | ||
1392 | |||
1393 | return 0; | ||
1394 | } | ||
1395 | |||
1396 | static int | ||
1397 | csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp) | ||
1398 | { | ||
1399 | struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb; | ||
1400 | uint16_t caps; | ||
1401 | |||
1402 | caps = ntohs(rsp->fcoecaps); | ||
1403 | |||
1404 | if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) { | ||
1405 | csio_err(hw, "No FCoE Initiator capability in the firmware.\n"); | ||
1406 | return -EINVAL; | ||
1407 | } | ||
1408 | |||
1409 | if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) { | ||
1410 | csio_err(hw, "No FCoE Control Offload capability\n"); | ||
1411 | return -EINVAL; | ||
1412 | } | ||
1413 | |||
1414 | return 0; | ||
1415 | } | ||
1416 | |||
1417 | /* | ||
1418 | * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET | ||
1419 | * @hw: the HW module | ||
1420 | * @mbox: mailbox to use for the FW RESET command (if desired) | ||
1421 | * @force: force uP into RESET even if FW RESET command fails | ||
1422 | * | ||
1423 | * Issues a RESET command to firmware (if desired) with a HALT indication | ||
1424 | * and then puts the microprocessor into RESET state. The RESET command | ||
1425 | * will only be issued if a legitimate mailbox is provided (mbox <= | ||
1426 | * PCIE_FW_MASTER_MASK). | ||
1427 | * | ||
1428 | * This is generally used in order for the host to safely manipulate the | ||
1429 | * adapter without fear of conflicting with whatever the firmware might | ||
1430 | * be doing. The only way out of this state is to RESTART the firmware | ||
1431 | * ... | ||
1432 | */ | ||
1433 | static int | ||
1434 | csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force) | ||
1435 | { | ||
1436 | enum fw_retval retval = 0; | ||
1437 | |||
1438 | /* | ||
1439 | * If a legitimate mailbox is provided, issue a RESET command | ||
1440 | * with a HALT indication. | ||
1441 | */ | ||
1442 | if (mbox <= PCIE_FW_MASTER_MASK) { | ||
1443 | struct csio_mb *mbp; | ||
1444 | |||
1445 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
1446 | if (!mbp) { | ||
1447 | CSIO_INC_STATS(hw, n_err_nomem); | ||
1448 | return -ENOMEM; | ||
1449 | } | ||
1450 | |||
1451 | csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, | ||
1452 | PIORSTMODE | PIORST, FW_RESET_CMD_HALT(1), | ||
1453 | NULL); | ||
1454 | |||
1455 | if (csio_mb_issue(hw, mbp)) { | ||
1456 | csio_err(hw, "Issue of RESET command failed!\n"); | ||
1457 | mempool_free(mbp, hw->mb_mempool); | ||
1458 | return -EINVAL; | ||
1459 | } | ||
1460 | |||
1461 | retval = csio_mb_fw_retval(mbp); | ||
1462 | mempool_free(mbp, hw->mb_mempool); | ||
1463 | } | ||
1464 | |||
1465 | /* | ||
1466 | * Normally we won't complete the operation if the firmware RESET | ||
1467 | * command fails but if our caller insists we'll go ahead and put the | ||
1468 | * uP into RESET. This can be useful if the firmware is hung or even | ||
1469 | * missing ... We'll have to take the risk of putting the uP into | ||
1470 | * RESET without the cooperation of firmware in that case. | ||
1471 | * | ||
1472 | * We also force the firmware's HALT flag to be on in case we bypassed | ||
1473 | * the firmware RESET command above or we're dealing with old firmware | ||
1474 | * which doesn't have the HALT capability. This will serve as a flag | ||
1475 | * for the incoming firmware to know that it's coming out of a HALT | ||
1476 | * rather than a RESET ... if it's new enough to understand that ... | ||
1477 | */ | ||
1478 | if (retval == 0 || force) { | ||
1479 | csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST); | ||
1480 | csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT); | ||
1481 | } | ||
1482 | |||
1483 | /* | ||
1484 | * And we always return the result of the firmware RESET command | ||
1485 | * even when we force the uP into RESET ... | ||
1486 | */ | ||
1487 | return retval ? -EINVAL : 0; | ||
1488 | } | ||
1489 | |||
1490 | /* | ||
1491 | * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET | ||
1492 | * @hw: the HW module | ||
1493 | * @reset: if we want to do a RESET to restart things | ||
1494 | * | ||
1495 | * Restart firmware previously halted by csio_hw_fw_halt(). On successful | ||
1496 | * return the previous PF Master remains as the new PF Master and there | ||
1497 | * is no need to issue a new HELLO command, etc. | ||
1498 | * | ||
1499 | * We do this in two ways: | ||
1500 | * | ||
1501 | * 1. If we're dealing with newer firmware we'll simply want to take | ||
1502 | * the chip's microprocessor out of RESET. This will cause the | ||
1503 | * firmware to start up from its start vector. And then we'll loop | ||
1504 | * until the firmware indicates it's started again (PCIE_FW.HALT | ||
1505 | * reset to 0) or we timeout. | ||
1506 | * | ||
1507 | * 2. If we're dealing with older firmware then we'll need to RESET | ||
1508 | * the chip since older firmware won't recognize the PCIE_FW.HALT | ||
1509 | * flag and automatically RESET itself on startup. | ||
1510 | */ | ||
1511 | static int | ||
1512 | csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset) | ||
1513 | { | ||
1514 | if (reset) { | ||
1515 | /* | ||
1516 | * Since we're directing the RESET instead of the firmware | ||
1517 | * doing it automatically, we need to clear the PCIE_FW.HALT | ||
1518 | * bit. | ||
1519 | */ | ||
1520 | csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0); | ||
1521 | |||
1522 | /* | ||
1523 | * If we've been given a valid mailbox, first try to get the | ||
1524 | * firmware to do the RESET. If that works, great and we can | ||
1525 | * return success. Otherwise, if we haven't been given a | ||
1526 | * valid mailbox or the RESET command failed, fall back to | ||
1527 | * hitting the chip with a hammer. | ||
1528 | */ | ||
1529 | if (mbox <= PCIE_FW_MASTER_MASK) { | ||
1530 | csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0); | ||
1531 | msleep(100); | ||
1532 | if (csio_do_reset(hw, true) == 0) | ||
1533 | return 0; | ||
1534 | } | ||
1535 | |||
1536 | csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); | ||
1537 | msleep(2000); | ||
1538 | } else { | ||
1539 | int ms; | ||
1540 | |||
1541 | csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0); | ||
1542 | for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { | ||
1543 | if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT)) | ||
1544 | return 0; | ||
1545 | msleep(100); | ||
1546 | ms += 100; | ||
1547 | } | ||
1548 | return -ETIMEDOUT; | ||
1549 | } | ||
1550 | return 0; | ||
1551 | } | ||
1552 | |||
1553 | /* | ||
1554 | * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW | ||
1555 | * @hw: the HW module | ||
1556 | * @mbox: mailbox to use for the FW RESET command (if desired) | ||
1557 | * @fw_data: the firmware image to write | ||
1558 | * @size: image size | ||
1559 | * @force: force upgrade even if firmware doesn't cooperate | ||
1560 | * | ||
1561 | * Perform all of the steps necessary for upgrading an adapter's | ||
1562 | * firmware image. Normally this requires the cooperation of the | ||
1563 | * existing firmware in order to halt all existing activities | ||
1564 | * but if an invalid mailbox token is passed in we skip that step | ||
1565 | * (though we'll still put the adapter microprocessor into RESET in | ||
1566 | * that case). | ||
1567 | * | ||
1568 | * On successful return the new firmware will have been loaded and | ||
1569 | * the adapter will have been fully RESET losing all previous setup | ||
1570 | * state. On unsuccessful return the adapter may be completely hosed ... | ||
1571 | * positive errno indicates that the adapter is ~probably~ intact, a | ||
1572 | * negative errno indicates that things are looking bad ... | ||
1573 | */ | ||
1574 | static int | ||
1575 | csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox, | ||
1576 | const u8 *fw_data, uint32_t size, int32_t force) | ||
1577 | { | ||
1578 | const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; | ||
1579 | int reset, ret; | ||
1580 | |||
1581 | ret = csio_hw_fw_halt(hw, mbox, force); | ||
1582 | if (ret != 0 && !force) | ||
1583 | return ret; | ||
1584 | |||
1585 | ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size); | ||
1586 | if (ret != 0) | ||
1587 | return ret; | ||
1588 | |||
1589 | /* | ||
1590 | * Older versions of the firmware don't understand the new | ||
1591 | * PCIE_FW.HALT flag and so won't know to perform a RESET when they | ||
1592 | * restart. So for newly loaded older firmware we'll have to do the | ||
1593 | * RESET for it so it starts up on a clean slate. We can tell if | ||
1594 | * the newly loaded firmware will handle this right by checking | ||
1595 | * its header flags to see if it advertises the capability. | ||
1596 | */ | ||
1597 | reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); | ||
1598 | return csio_hw_fw_restart(hw, mbox, reset); | ||
1599 | } | ||
1600 | |||
1601 | |||
1602 | /* | ||
1603 | * csio_hw_fw_config_file - setup an adapter via a Configuration File | ||
1604 | * @hw: the HW module | ||
1605 | * @mbox: mailbox to use for the FW command | ||
1606 | * @mtype: the memory type where the Configuration File is located | ||
1607 | * @maddr: the memory address where the Configuration File is located | ||
1608 | * @finiver: return value for CF [fini] version | ||
1609 | * @finicsum: return value for CF [fini] checksum | ||
1610 | * @cfcsum: return value for CF computed checksum | ||
1611 | * | ||
1612 | * Issue a command to get the firmware to process the Configuration | ||
1613 | * File located at the specified mtype/maddress. If the Configuration | ||
1614 | * File is processed successfully and return value pointers are | ||
1615 | * provided, the Configuration File "[fini] section version and | ||
1616 | * checksum values will be returned along with the computed checksum. | ||
1617 | * It's up to the caller to decide how it wants to respond to the | ||
1618 | * checksums not matching but it recommended that a prominant warning | ||
1619 | * be emitted in order to help people rapidly identify changed or | ||
1620 | * corrupted Configuration Files. | ||
1621 | * | ||
1622 | * Also note that it's possible to modify things like "niccaps", | ||
1623 | * "toecaps",etc. between processing the Configuration File and telling | ||
1624 | * the firmware to use the new configuration. Callers which want to | ||
1625 | * do this will need to "hand-roll" their own CAPS_CONFIGS commands for | ||
1626 | * Configuration Files if they want to do this. | ||
1627 | */ | ||
1628 | static int | ||
1629 | csio_hw_fw_config_file(struct csio_hw *hw, | ||
1630 | unsigned int mtype, unsigned int maddr, | ||
1631 | uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum) | ||
1632 | { | ||
1633 | struct csio_mb *mbp; | ||
1634 | struct fw_caps_config_cmd *caps_cmd; | ||
1635 | int rv = -EINVAL; | ||
1636 | enum fw_retval ret; | ||
1637 | |||
1638 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
1639 | if (!mbp) { | ||
1640 | CSIO_INC_STATS(hw, n_err_nomem); | ||
1641 | return -ENOMEM; | ||
1642 | } | ||
1643 | /* | ||
1644 | * Tell the firmware to process the indicated Configuration File. | ||
1645 | * If there are no errors and the caller has provided return value | ||
1646 | * pointers for the [fini] section version, checksum and computed | ||
1647 | * checksum, pass those back to the caller. | ||
1648 | */ | ||
1649 | caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb); | ||
1650 | CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); | ||
1651 | caps_cmd->op_to_write = | ||
1652 | htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | | ||
1653 | FW_CMD_REQUEST | | ||
1654 | FW_CMD_READ); | ||
1655 | caps_cmd->cfvalid_to_len16 = | ||
1656 | htonl(FW_CAPS_CONFIG_CMD_CFVALID | | ||
1657 | FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | | ||
1658 | FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | | ||
1659 | FW_LEN16(*caps_cmd)); | ||
1660 | |||
1661 | if (csio_mb_issue(hw, mbp)) { | ||
1662 | csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n"); | ||
1663 | goto out; | ||
1664 | } | ||
1665 | |||
1666 | ret = csio_mb_fw_retval(mbp); | ||
1667 | if (ret != FW_SUCCESS) { | ||
1668 | csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); | ||
1669 | goto out; | ||
1670 | } | ||
1671 | |||
1672 | if (finiver) | ||
1673 | *finiver = ntohl(caps_cmd->finiver); | ||
1674 | if (finicsum) | ||
1675 | *finicsum = ntohl(caps_cmd->finicsum); | ||
1676 | if (cfcsum) | ||
1677 | *cfcsum = ntohl(caps_cmd->cfcsum); | ||
1678 | |||
1679 | /* Validate device capabilities */ | ||
1680 | if (csio_hw_validate_caps(hw, mbp)) { | ||
1681 | rv = -ENOENT; | ||
1682 | goto out; | ||
1683 | } | ||
1684 | |||
1685 | /* | ||
1686 | * And now tell the firmware to use the configuration we just loaded. | ||
1687 | */ | ||
1688 | caps_cmd->op_to_write = | ||
1689 | htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | | ||
1690 | FW_CMD_REQUEST | | ||
1691 | FW_CMD_WRITE); | ||
1692 | caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); | ||
1693 | |||
1694 | if (csio_mb_issue(hw, mbp)) { | ||
1695 | csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n"); | ||
1696 | goto out; | ||
1697 | } | ||
1698 | |||
1699 | ret = csio_mb_fw_retval(mbp); | ||
1700 | if (ret != FW_SUCCESS) { | ||
1701 | csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); | ||
1702 | goto out; | ||
1703 | } | ||
1704 | |||
1705 | rv = 0; | ||
1706 | out: | ||
1707 | mempool_free(mbp, hw->mb_mempool); | ||
1708 | return rv; | ||
1709 | } | ||
1710 | |||
1711 | /* | ||
1712 | * csio_get_device_params - Get device parameters. | ||
1713 | * @hw: HW module | ||
1714 | * | ||
1715 | */ | ||
1716 | static int | ||
1717 | csio_get_device_params(struct csio_hw *hw) | ||
1718 | { | ||
1719 | struct csio_wrm *wrm = csio_hw_to_wrm(hw); | ||
1720 | struct csio_mb *mbp; | ||
1721 | enum fw_retval retval; | ||
1722 | u32 param[6]; | ||
1723 | int i, j = 0; | ||
1724 | |||
1725 | /* Initialize portids to -1 */ | ||
1726 | for (i = 0; i < CSIO_MAX_PPORTS; i++) | ||
1727 | hw->pport[i].portid = -1; | ||
1728 | |||
1729 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
1730 | if (!mbp) { | ||
1731 | CSIO_INC_STATS(hw, n_err_nomem); | ||
1732 | return -ENOMEM; | ||
1733 | } | ||
1734 | |||
1735 | /* Get port vec information. */ | ||
1736 | param[0] = FW_PARAM_DEV(PORTVEC); | ||
1737 | |||
1738 | /* Get Core clock. */ | ||
1739 | param[1] = FW_PARAM_DEV(CCLK); | ||
1740 | |||
1741 | /* Get EQ id start and end. */ | ||
1742 | param[2] = FW_PARAM_PFVF(EQ_START); | ||
1743 | param[3] = FW_PARAM_PFVF(EQ_END); | ||
1744 | |||
1745 | /* Get IQ id start and end. */ | ||
1746 | param[4] = FW_PARAM_PFVF(IQFLINT_START); | ||
1747 | param[5] = FW_PARAM_PFVF(IQFLINT_END); | ||
1748 | |||
1749 | csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, | ||
1750 | ARRAY_SIZE(param), param, NULL, false, NULL); | ||
1751 | if (csio_mb_issue(hw, mbp)) { | ||
1752 | csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); | ||
1753 | mempool_free(mbp, hw->mb_mempool); | ||
1754 | return -EINVAL; | ||
1755 | } | ||
1756 | |||
1757 | csio_mb_process_read_params_rsp(hw, mbp, &retval, | ||
1758 | ARRAY_SIZE(param), param); | ||
1759 | if (retval != FW_SUCCESS) { | ||
1760 | csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", | ||
1761 | retval); | ||
1762 | mempool_free(mbp, hw->mb_mempool); | ||
1763 | return -EINVAL; | ||
1764 | } | ||
1765 | |||
1766 | /* cache the information. */ | ||
1767 | hw->port_vec = param[0]; | ||
1768 | hw->vpd.cclk = param[1]; | ||
1769 | wrm->fw_eq_start = param[2]; | ||
1770 | wrm->fw_iq_start = param[4]; | ||
1771 | |||
1772 | /* Using FW configured max iqs & eqs */ | ||
1773 | if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) || | ||
1774 | !csio_is_hw_master(hw)) { | ||
1775 | hw->cfg_niq = param[5] - param[4] + 1; | ||
1776 | hw->cfg_neq = param[3] - param[2] + 1; | ||
1777 | csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n", | ||
1778 | hw->cfg_niq, hw->cfg_neq); | ||
1779 | } | ||
1780 | |||
1781 | hw->port_vec &= csio_port_mask; | ||
1782 | |||
1783 | hw->num_pports = hweight32(hw->port_vec); | ||
1784 | |||
1785 | csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n", | ||
1786 | hw->port_vec, hw->num_pports); | ||
1787 | |||
1788 | for (i = 0; i < hw->num_pports; i++) { | ||
1789 | while ((hw->port_vec & (1 << j)) == 0) | ||
1790 | j++; | ||
1791 | hw->pport[i].portid = j++; | ||
1792 | csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid); | ||
1793 | } | ||
1794 | mempool_free(mbp, hw->mb_mempool); | ||
1795 | |||
1796 | return 0; | ||
1797 | } | ||
1798 | |||
1799 | |||
1800 | /* | ||
1801 | * csio_config_device_caps - Get and set device capabilities. | ||
1802 | * @hw: HW module | ||
1803 | * | ||
1804 | */ | ||
1805 | static int | ||
1806 | csio_config_device_caps(struct csio_hw *hw) | ||
1807 | { | ||
1808 | struct csio_mb *mbp; | ||
1809 | enum fw_retval retval; | ||
1810 | int rv = -EINVAL; | ||
1811 | |||
1812 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
1813 | if (!mbp) { | ||
1814 | CSIO_INC_STATS(hw, n_err_nomem); | ||
1815 | return -ENOMEM; | ||
1816 | } | ||
1817 | |||
1818 | /* Get device capabilities */ | ||
1819 | csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL); | ||
1820 | |||
1821 | if (csio_mb_issue(hw, mbp)) { | ||
1822 | csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n"); | ||
1823 | goto out; | ||
1824 | } | ||
1825 | |||
1826 | retval = csio_mb_fw_retval(mbp); | ||
1827 | if (retval != FW_SUCCESS) { | ||
1828 | csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval); | ||
1829 | goto out; | ||
1830 | } | ||
1831 | |||
1832 | /* Validate device capabilities */ | ||
1833 | if (csio_hw_validate_caps(hw, mbp)) | ||
1834 | goto out; | ||
1835 | |||
1836 | /* Don't config device capabilities if already configured */ | ||
1837 | if (hw->fw_state == CSIO_DEV_STATE_INIT) { | ||
1838 | rv = 0; | ||
1839 | goto out; | ||
1840 | } | ||
1841 | |||
1842 | /* Write back desired device capabilities */ | ||
1843 | csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true, | ||
1844 | false, true, NULL); | ||
1845 | |||
1846 | if (csio_mb_issue(hw, mbp)) { | ||
1847 | csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n"); | ||
1848 | goto out; | ||
1849 | } | ||
1850 | |||
1851 | retval = csio_mb_fw_retval(mbp); | ||
1852 | if (retval != FW_SUCCESS) { | ||
1853 | csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval); | ||
1854 | goto out; | ||
1855 | } | ||
1856 | |||
1857 | rv = 0; | ||
1858 | out: | ||
1859 | mempool_free(mbp, hw->mb_mempool); | ||
1860 | return rv; | ||
1861 | } | ||
1862 | |||
1863 | static int | ||
1864 | csio_config_global_rss(struct csio_hw *hw) | ||
1865 | { | ||
1866 | struct csio_mb *mbp; | ||
1867 | enum fw_retval retval; | ||
1868 | |||
1869 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
1870 | if (!mbp) { | ||
1871 | CSIO_INC_STATS(hw, n_err_nomem); | ||
1872 | return -ENOMEM; | ||
1873 | } | ||
1874 | |||
1875 | csio_rss_glb_config(hw, mbp, CSIO_MB_DEFAULT_TMO, | ||
1876 | FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, | ||
1877 | FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | | ||
1878 | FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ | | ||
1879 | FW_RSS_GLB_CONFIG_CMD_TNLALLLKP, | ||
1880 | NULL); | ||
1881 | |||
1882 | if (csio_mb_issue(hw, mbp)) { | ||
1883 | csio_err(hw, "Issue of FW_RSS_GLB_CONFIG_CMD failed!\n"); | ||
1884 | mempool_free(mbp, hw->mb_mempool); | ||
1885 | return -EINVAL; | ||
1886 | } | ||
1887 | |||
1888 | retval = csio_mb_fw_retval(mbp); | ||
1889 | if (retval != FW_SUCCESS) { | ||
1890 | csio_err(hw, "FW_RSS_GLB_CONFIG_CMD returned 0x%x!\n", retval); | ||
1891 | mempool_free(mbp, hw->mb_mempool); | ||
1892 | return -EINVAL; | ||
1893 | } | ||
1894 | |||
1895 | mempool_free(mbp, hw->mb_mempool); | ||
1896 | |||
1897 | return 0; | ||
1898 | } | ||
1899 | |||
1900 | /* | ||
1901 | * csio_config_pfvf - Configure Physical/Virtual functions settings. | ||
1902 | * @hw: HW module | ||
1903 | * | ||
1904 | */ | ||
1905 | static int | ||
1906 | csio_config_pfvf(struct csio_hw *hw) | ||
1907 | { | ||
1908 | struct csio_mb *mbp; | ||
1909 | enum fw_retval retval; | ||
1910 | |||
1911 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
1912 | if (!mbp) { | ||
1913 | CSIO_INC_STATS(hw, n_err_nomem); | ||
1914 | return -ENOMEM; | ||
1915 | } | ||
1916 | |||
1917 | /* | ||
1918 | * For now, allow all PFs to access to all ports using a pmask | ||
1919 | * value of 0xF (M_FW_PFVF_CMD_PMASK). Once we have VFs, we will | ||
1920 | * need to provide access based on some rule. | ||
1921 | */ | ||
1922 | csio_mb_pfvf(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, CSIO_NEQ, | ||
1923 | CSIO_NETH_CTRL, CSIO_NIQ_FLINT, 0, 0, CSIO_NVI, CSIO_CMASK, | ||
1924 | CSIO_PMASK, CSIO_NEXACTF, CSIO_R_CAPS, CSIO_WX_CAPS, NULL); | ||
1925 | |||
1926 | if (csio_mb_issue(hw, mbp)) { | ||
1927 | csio_err(hw, "Issue of FW_PFVF_CMD failed!\n"); | ||
1928 | mempool_free(mbp, hw->mb_mempool); | ||
1929 | return -EINVAL; | ||
1930 | } | ||
1931 | |||
1932 | retval = csio_mb_fw_retval(mbp); | ||
1933 | if (retval != FW_SUCCESS) { | ||
1934 | csio_err(hw, "FW_PFVF_CMD returned 0x%x!\n", retval); | ||
1935 | mempool_free(mbp, hw->mb_mempool); | ||
1936 | return -EINVAL; | ||
1937 | } | ||
1938 | |||
1939 | mempool_free(mbp, hw->mb_mempool); | ||
1940 | |||
1941 | return 0; | ||
1942 | } | ||
1943 | |||
1944 | /* | ||
1945 | * csio_enable_ports - Bring up all available ports. | ||
1946 | * @hw: HW module. | ||
1947 | * | ||
1948 | */ | ||
1949 | static int | ||
1950 | csio_enable_ports(struct csio_hw *hw) | ||
1951 | { | ||
1952 | struct csio_mb *mbp; | ||
1953 | enum fw_retval retval; | ||
1954 | uint8_t portid; | ||
1955 | int i; | ||
1956 | |||
1957 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
1958 | if (!mbp) { | ||
1959 | CSIO_INC_STATS(hw, n_err_nomem); | ||
1960 | return -ENOMEM; | ||
1961 | } | ||
1962 | |||
1963 | for (i = 0; i < hw->num_pports; i++) { | ||
1964 | portid = hw->pport[i].portid; | ||
1965 | |||
1966 | /* Read PORT information */ | ||
1967 | csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, | ||
1968 | false, 0, 0, NULL); | ||
1969 | |||
1970 | if (csio_mb_issue(hw, mbp)) { | ||
1971 | csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n", | ||
1972 | portid); | ||
1973 | mempool_free(mbp, hw->mb_mempool); | ||
1974 | return -EINVAL; | ||
1975 | } | ||
1976 | |||
1977 | csio_mb_process_read_port_rsp(hw, mbp, &retval, | ||
1978 | &hw->pport[i].pcap); | ||
1979 | if (retval != FW_SUCCESS) { | ||
1980 | csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n", | ||
1981 | portid, retval); | ||
1982 | mempool_free(mbp, hw->mb_mempool); | ||
1983 | return -EINVAL; | ||
1984 | } | ||
1985 | |||
1986 | /* Write back PORT information */ | ||
1987 | csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true, | ||
1988 | (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL); | ||
1989 | |||
1990 | if (csio_mb_issue(hw, mbp)) { | ||
1991 | csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n", | ||
1992 | portid); | ||
1993 | mempool_free(mbp, hw->mb_mempool); | ||
1994 | return -EINVAL; | ||
1995 | } | ||
1996 | |||
1997 | retval = csio_mb_fw_retval(mbp); | ||
1998 | if (retval != FW_SUCCESS) { | ||
1999 | csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n", | ||
2000 | portid, retval); | ||
2001 | mempool_free(mbp, hw->mb_mempool); | ||
2002 | return -EINVAL; | ||
2003 | } | ||
2004 | |||
2005 | } /* For all ports */ | ||
2006 | |||
2007 | mempool_free(mbp, hw->mb_mempool); | ||
2008 | |||
2009 | return 0; | ||
2010 | } | ||
2011 | |||
2012 | /* | ||
2013 | * csio_get_fcoe_resinfo - Read fcoe fw resource info. | ||
2014 | * @hw: HW module | ||
2015 | * Issued with lock held. | ||
2016 | */ | ||
2017 | static int | ||
2018 | csio_get_fcoe_resinfo(struct csio_hw *hw) | ||
2019 | { | ||
2020 | struct csio_fcoe_res_info *res_info = &hw->fres_info; | ||
2021 | struct fw_fcoe_res_info_cmd *rsp; | ||
2022 | struct csio_mb *mbp; | ||
2023 | enum fw_retval retval; | ||
2024 | |||
2025 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
2026 | if (!mbp) { | ||
2027 | CSIO_INC_STATS(hw, n_err_nomem); | ||
2028 | return -ENOMEM; | ||
2029 | } | ||
2030 | |||
2031 | /* Get FCoE FW resource information */ | ||
2032 | csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); | ||
2033 | |||
2034 | if (csio_mb_issue(hw, mbp)) { | ||
2035 | csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n"); | ||
2036 | mempool_free(mbp, hw->mb_mempool); | ||
2037 | return -EINVAL; | ||
2038 | } | ||
2039 | |||
2040 | rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb); | ||
2041 | retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); | ||
2042 | if (retval != FW_SUCCESS) { | ||
2043 | csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n", | ||
2044 | retval); | ||
2045 | mempool_free(mbp, hw->mb_mempool); | ||
2046 | return -EINVAL; | ||
2047 | } | ||
2048 | |||
2049 | res_info->e_d_tov = ntohs(rsp->e_d_tov); | ||
2050 | res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq); | ||
2051 | res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els); | ||
2052 | res_info->r_r_tov = ntohs(rsp->r_r_tov); | ||
2053 | res_info->max_xchgs = ntohl(rsp->max_xchgs); | ||
2054 | res_info->max_ssns = ntohl(rsp->max_ssns); | ||
2055 | res_info->used_xchgs = ntohl(rsp->used_xchgs); | ||
2056 | res_info->used_ssns = ntohl(rsp->used_ssns); | ||
2057 | res_info->max_fcfs = ntohl(rsp->max_fcfs); | ||
2058 | res_info->max_vnps = ntohl(rsp->max_vnps); | ||
2059 | res_info->used_fcfs = ntohl(rsp->used_fcfs); | ||
2060 | res_info->used_vnps = ntohl(rsp->used_vnps); | ||
2061 | |||
2062 | csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns, | ||
2063 | res_info->max_xchgs); | ||
2064 | mempool_free(mbp, hw->mb_mempool); | ||
2065 | |||
2066 | return 0; | ||
2067 | } | ||
2068 | |||
2069 | static int | ||
2070 | csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param) | ||
2071 | { | ||
2072 | struct csio_mb *mbp; | ||
2073 | enum fw_retval retval; | ||
2074 | u32 _param[1]; | ||
2075 | |||
2076 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
2077 | if (!mbp) { | ||
2078 | CSIO_INC_STATS(hw, n_err_nomem); | ||
2079 | return -ENOMEM; | ||
2080 | } | ||
2081 | |||
2082 | /* | ||
2083 | * Find out whether we're dealing with a version of | ||
2084 | * the firmware which has configuration file support. | ||
2085 | */ | ||
2086 | _param[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | | ||
2087 | FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); | ||
2088 | |||
2089 | csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, | ||
2090 | ARRAY_SIZE(_param), _param, NULL, false, NULL); | ||
2091 | if (csio_mb_issue(hw, mbp)) { | ||
2092 | csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); | ||
2093 | mempool_free(mbp, hw->mb_mempool); | ||
2094 | return -EINVAL; | ||
2095 | } | ||
2096 | |||
2097 | csio_mb_process_read_params_rsp(hw, mbp, &retval, | ||
2098 | ARRAY_SIZE(_param), _param); | ||
2099 | if (retval != FW_SUCCESS) { | ||
2100 | csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", | ||
2101 | retval); | ||
2102 | mempool_free(mbp, hw->mb_mempool); | ||
2103 | return -EINVAL; | ||
2104 | } | ||
2105 | |||
2106 | mempool_free(mbp, hw->mb_mempool); | ||
2107 | *param = _param[0]; | ||
2108 | |||
2109 | return 0; | ||
2110 | } | ||
2111 | |||
2112 | static int | ||
2113 | csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) | ||
2114 | { | ||
2115 | int ret = 0; | ||
2116 | const struct firmware *cf; | ||
2117 | struct pci_dev *pci_dev = hw->pdev; | ||
2118 | struct device *dev = &pci_dev->dev; | ||
2119 | |||
2120 | unsigned int mtype = 0, maddr = 0; | ||
2121 | uint32_t *cfg_data; | ||
2122 | int value_to_add = 0; | ||
2123 | |||
2124 | if (request_firmware(&cf, CSIO_CF_FNAME, dev) < 0) { | ||
2125 | csio_err(hw, "could not find config file " CSIO_CF_FNAME | ||
2126 | ",err: %d\n", ret); | ||
2127 | return -ENOENT; | ||
2128 | } | ||
2129 | |||
2130 | if (cf->size%4 != 0) | ||
2131 | value_to_add = 4 - (cf->size % 4); | ||
2132 | |||
2133 | cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL); | ||
2134 | if (cfg_data == NULL) | ||
2135 | return -ENOMEM; | ||
2136 | |||
2137 | memcpy((void *)cfg_data, (const void *)cf->data, cf->size); | ||
2138 | |||
2139 | if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) | ||
2140 | return -EINVAL; | ||
2141 | |||
2142 | mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param); | ||
2143 | maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16; | ||
2144 | |||
2145 | ret = csio_memory_write(hw, mtype, maddr, | ||
2146 | cf->size + value_to_add, cfg_data); | ||
2147 | if (ret == 0) { | ||
2148 | csio_info(hw, "config file upgraded to " CSIO_CF_FNAME "\n"); | ||
2149 | strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64); | ||
2150 | } | ||
2151 | |||
2152 | kfree(cfg_data); | ||
2153 | release_firmware(cf); | ||
2154 | |||
2155 | return ret; | ||
2156 | } | ||
2157 | |||
2158 | /* | ||
2159 | * HW initialization: contact FW, obtain config, perform basic init. | ||
2160 | * | ||
2161 | * If the firmware we're dealing with has Configuration File support, then | ||
2162 | * we use that to perform all configuration -- either using the configuration | ||
2163 | * file stored in flash on the adapter or using a filesystem-local file | ||
2164 | * if available. | ||
2165 | * | ||
2166 | * If we don't have configuration file support in the firmware, then we'll | ||
2167 | * have to set things up the old fashioned way with hard-coded register | ||
2168 | * writes and firmware commands ... | ||
2169 | */ | ||
2170 | |||
2171 | /* | ||
2172 | * Attempt to initialize the HW via a Firmware Configuration File. | ||
2173 | */ | ||
2174 | static int | ||
2175 | csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) | ||
2176 | { | ||
2177 | unsigned int mtype, maddr; | ||
2178 | int rv; | ||
2179 | uint32_t finiver, finicsum, cfcsum; | ||
2180 | int using_flash; | ||
2181 | char path[64]; | ||
2182 | |||
2183 | /* | ||
2184 | * Reset device if necessary | ||
2185 | */ | ||
2186 | if (reset) { | ||
2187 | rv = csio_do_reset(hw, true); | ||
2188 | if (rv != 0) | ||
2189 | goto bye; | ||
2190 | } | ||
2191 | |||
2192 | /* | ||
2193 | * If we have a configuration file in host , | ||
2194 | * then use that. Otherwise, use the configuration file stored | ||
2195 | * in the HW flash ... | ||
2196 | */ | ||
2197 | spin_unlock_irq(&hw->lock); | ||
2198 | rv = csio_hw_flash_config(hw, fw_cfg_param, path); | ||
2199 | spin_lock_irq(&hw->lock); | ||
2200 | if (rv != 0) { | ||
2201 | if (rv == -ENOENT) { | ||
2202 | /* | ||
2203 | * config file was not found. Use default | ||
2204 | * config file from flash. | ||
2205 | */ | ||
2206 | mtype = FW_MEMTYPE_CF_FLASH; | ||
2207 | maddr = csio_hw_flash_cfg_addr(hw); | ||
2208 | using_flash = 1; | ||
2209 | } else { | ||
2210 | /* | ||
2211 | * we revert back to the hardwired config if | ||
2212 | * flashing failed. | ||
2213 | */ | ||
2214 | goto bye; | ||
2215 | } | ||
2216 | } else { | ||
2217 | mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param); | ||
2218 | maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16; | ||
2219 | using_flash = 0; | ||
2220 | } | ||
2221 | |||
2222 | hw->cfg_store = (uint8_t)mtype; | ||
2223 | |||
2224 | /* | ||
2225 | * Issue a Capability Configuration command to the firmware to get it | ||
2226 | * to parse the Configuration File. | ||
2227 | */ | ||
2228 | rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver, | ||
2229 | &finicsum, &cfcsum); | ||
2230 | if (rv != 0) | ||
2231 | goto bye; | ||
2232 | |||
2233 | hw->cfg_finiver = finiver; | ||
2234 | hw->cfg_finicsum = finicsum; | ||
2235 | hw->cfg_cfcsum = cfcsum; | ||
2236 | hw->cfg_csum_status = true; | ||
2237 | |||
2238 | if (finicsum != cfcsum) { | ||
2239 | csio_warn(hw, | ||
2240 | "Config File checksum mismatch: csum=%#x, computed=%#x\n", | ||
2241 | finicsum, cfcsum); | ||
2242 | |||
2243 | hw->cfg_csum_status = false; | ||
2244 | } | ||
2245 | |||
2246 | /* | ||
2247 | * Note that we're operating with parameters | ||
2248 | * not supplied by the driver, rather than from hard-wired | ||
2249 | * initialization constants buried in the driver. | ||
2250 | */ | ||
2251 | hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; | ||
2252 | |||
2253 | /* device parameters */ | ||
2254 | rv = csio_get_device_params(hw); | ||
2255 | if (rv != 0) | ||
2256 | goto bye; | ||
2257 | |||
2258 | /* Configure SGE */ | ||
2259 | csio_wr_sge_init(hw); | ||
2260 | |||
2261 | /* | ||
2262 | * And finally tell the firmware to initialize itself using the | ||
2263 | * parameters from the Configuration File. | ||
2264 | */ | ||
2265 | /* Post event to notify completion of configuration */ | ||
2266 | csio_post_event(&hw->sm, CSIO_HWE_INIT); | ||
2267 | |||
2268 | csio_info(hw, | ||
2269 | "Firmware Configuration File %s, version %#x, computed checksum %#x\n", | ||
2270 | (using_flash ? "in device FLASH" : path), finiver, cfcsum); | ||
2271 | |||
2272 | return 0; | ||
2273 | |||
2274 | /* | ||
2275 | * Something bad happened. Return the error ... | ||
2276 | */ | ||
2277 | bye: | ||
2278 | hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS; | ||
2279 | csio_dbg(hw, "Configuration file error %d\n", rv); | ||
2280 | return rv; | ||
2281 | } | ||
2282 | |||
2283 | /* | ||
2284 | * Attempt to initialize the adapter via hard-coded, driver supplied | ||
2285 | * parameters ... | ||
2286 | */ | ||
2287 | static int | ||
2288 | csio_hw_no_fwconfig(struct csio_hw *hw, int reset) | ||
2289 | { | ||
2290 | int rv; | ||
2291 | /* | ||
2292 | * Reset device if necessary | ||
2293 | */ | ||
2294 | if (reset) { | ||
2295 | rv = csio_do_reset(hw, true); | ||
2296 | if (rv != 0) | ||
2297 | goto out; | ||
2298 | } | ||
2299 | |||
2300 | /* Get and set device capabilities */ | ||
2301 | rv = csio_config_device_caps(hw); | ||
2302 | if (rv != 0) | ||
2303 | goto out; | ||
2304 | |||
2305 | /* Config Global RSS command */ | ||
2306 | rv = csio_config_global_rss(hw); | ||
2307 | if (rv != 0) | ||
2308 | goto out; | ||
2309 | |||
2310 | /* Configure PF/VF capabilities of device */ | ||
2311 | rv = csio_config_pfvf(hw); | ||
2312 | if (rv != 0) | ||
2313 | goto out; | ||
2314 | |||
2315 | /* device parameters */ | ||
2316 | rv = csio_get_device_params(hw); | ||
2317 | if (rv != 0) | ||
2318 | goto out; | ||
2319 | |||
2320 | /* Configure SGE */ | ||
2321 | csio_wr_sge_init(hw); | ||
2322 | |||
2323 | /* Post event to notify completion of configuration */ | ||
2324 | csio_post_event(&hw->sm, CSIO_HWE_INIT); | ||
2325 | |||
2326 | out: | ||
2327 | return rv; | ||
2328 | } | ||
2329 | |||
2330 | /* | ||
2331 | * Returns -EINVAL if attempts to flash the firmware failed | ||
2332 | * else returns 0, | ||
2333 | * if flashing was not attempted because the card had the | ||
2334 | * latest firmware ECANCELED is returned | ||
2335 | */ | ||
2336 | static int | ||
2337 | csio_hw_flash_fw(struct csio_hw *hw) | ||
2338 | { | ||
2339 | int ret = -ECANCELED; | ||
2340 | const struct firmware *fw; | ||
2341 | const struct fw_hdr *hdr; | ||
2342 | u32 fw_ver; | ||
2343 | struct pci_dev *pci_dev = hw->pdev; | ||
2344 | struct device *dev = &pci_dev->dev ; | ||
2345 | |||
2346 | if (request_firmware(&fw, CSIO_FW_FNAME, dev) < 0) { | ||
2347 | csio_err(hw, "could not find firmware image " CSIO_FW_FNAME | ||
2348 | ",err: %d\n", ret); | ||
2349 | return -EINVAL; | ||
2350 | } | ||
2351 | |||
2352 | hdr = (const struct fw_hdr *)fw->data; | ||
2353 | fw_ver = ntohl(hdr->fw_ver); | ||
2354 | if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR) | ||
2355 | return -EINVAL; /* wrong major version, won't do */ | ||
2356 | |||
2357 | /* | ||
2358 | * If the flash FW is unusable or we found something newer, load it. | ||
2359 | */ | ||
2360 | if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR || | ||
2361 | fw_ver > hw->fwrev) { | ||
2362 | ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size, | ||
2363 | /*force=*/false); | ||
2364 | if (!ret) | ||
2365 | csio_info(hw, "firmware upgraded to version %pI4 from " | ||
2366 | CSIO_FW_FNAME "\n", &hdr->fw_ver); | ||
2367 | else | ||
2368 | csio_err(hw, "firmware upgrade failed! err=%d\n", ret); | ||
2369 | } | ||
2370 | |||
2371 | release_firmware(fw); | ||
2372 | |||
2373 | return ret; | ||
2374 | } | ||
2375 | |||
2376 | |||
2377 | /* | ||
2378 | * csio_hw_configure - Configure HW | ||
2379 | * @hw - HW module | ||
2380 | * | ||
2381 | */ | ||
2382 | static void | ||
2383 | csio_hw_configure(struct csio_hw *hw) | ||
2384 | { | ||
2385 | int reset = 1; | ||
2386 | int rv; | ||
2387 | u32 param[1]; | ||
2388 | |||
2389 | rv = csio_hw_dev_ready(hw); | ||
2390 | if (rv != 0) { | ||
2391 | CSIO_INC_STATS(hw, n_err_fatal); | ||
2392 | csio_post_event(&hw->sm, CSIO_HWE_FATAL); | ||
2393 | goto out; | ||
2394 | } | ||
2395 | |||
2396 | /* HW version */ | ||
2397 | hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV); | ||
2398 | |||
2399 | /* Needed for FW download */ | ||
2400 | rv = csio_hw_get_flash_params(hw); | ||
2401 | if (rv != 0) { | ||
2402 | csio_err(hw, "Failed to get serial flash params rv:%d\n", rv); | ||
2403 | csio_post_event(&hw->sm, CSIO_HWE_FATAL); | ||
2404 | goto out; | ||
2405 | } | ||
2406 | |||
2407 | /* Set pci completion timeout value to 4 seconds. */ | ||
2408 | csio_set_pcie_completion_timeout(hw, 0xd); | ||
2409 | |||
2410 | csio_hw_set_mem_win(hw); | ||
2411 | |||
2412 | rv = csio_hw_get_fw_version(hw, &hw->fwrev); | ||
2413 | if (rv != 0) | ||
2414 | goto out; | ||
2415 | |||
2416 | csio_hw_print_fw_version(hw, "Firmware revision"); | ||
2417 | |||
2418 | rv = csio_do_hello(hw, &hw->fw_state); | ||
2419 | if (rv != 0) { | ||
2420 | CSIO_INC_STATS(hw, n_err_fatal); | ||
2421 | csio_post_event(&hw->sm, CSIO_HWE_FATAL); | ||
2422 | goto out; | ||
2423 | } | ||
2424 | |||
2425 | /* Read vpd */ | ||
2426 | rv = csio_hw_get_vpd_params(hw, &hw->vpd); | ||
2427 | if (rv != 0) | ||
2428 | goto out; | ||
2429 | |||
2430 | if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { | ||
2431 | rv = csio_hw_check_fw_version(hw); | ||
2432 | if (rv == -EINVAL) { | ||
2433 | |||
2434 | /* Do firmware update */ | ||
2435 | spin_unlock_irq(&hw->lock); | ||
2436 | rv = csio_hw_flash_fw(hw); | ||
2437 | spin_lock_irq(&hw->lock); | ||
2438 | |||
2439 | if (rv == 0) { | ||
2440 | reset = 0; | ||
2441 | /* | ||
2442 | * Note that the chip was reset as part of the | ||
2443 | * firmware upgrade so we don't reset it again | ||
2444 | * below and grab the new firmware version. | ||
2445 | */ | ||
2446 | rv = csio_hw_check_fw_version(hw); | ||
2447 | } | ||
2448 | } | ||
2449 | /* | ||
2450 | * If the firmware doesn't support Configuration | ||
2451 | * Files, use the old Driver-based, hard-wired | ||
2452 | * initialization. Otherwise, try using the | ||
2453 | * Configuration File support and fall back to the | ||
2454 | * Driver-based initialization if there's no | ||
2455 | * Configuration File found. | ||
2456 | */ | ||
2457 | if (csio_hw_check_fwconfig(hw, param) == 0) { | ||
2458 | rv = csio_hw_use_fwconfig(hw, reset, param); | ||
2459 | if (rv == -ENOENT) | ||
2460 | goto out; | ||
2461 | if (rv != 0) { | ||
2462 | csio_info(hw, | ||
2463 | "No Configuration File present " | ||
2464 | "on adapter. Using hard-wired " | ||
2465 | "configuration parameters.\n"); | ||
2466 | rv = csio_hw_no_fwconfig(hw, reset); | ||
2467 | } | ||
2468 | } else { | ||
2469 | rv = csio_hw_no_fwconfig(hw, reset); | ||
2470 | } | ||
2471 | |||
2472 | if (rv != 0) | ||
2473 | goto out; | ||
2474 | |||
2475 | } else { | ||
2476 | if (hw->fw_state == CSIO_DEV_STATE_INIT) { | ||
2477 | |||
2478 | /* device parameters */ | ||
2479 | rv = csio_get_device_params(hw); | ||
2480 | if (rv != 0) | ||
2481 | goto out; | ||
2482 | |||
2483 | /* Get device capabilities */ | ||
2484 | rv = csio_config_device_caps(hw); | ||
2485 | if (rv != 0) | ||
2486 | goto out; | ||
2487 | |||
2488 | /* Configure SGE */ | ||
2489 | csio_wr_sge_init(hw); | ||
2490 | |||
2491 | /* Post event to notify completion of configuration */ | ||
2492 | csio_post_event(&hw->sm, CSIO_HWE_INIT); | ||
2493 | goto out; | ||
2494 | } | ||
2495 | } /* if not master */ | ||
2496 | |||
2497 | out: | ||
2498 | return; | ||
2499 | } | ||
2500 | |||
2501 | /* | ||
2502 | * csio_hw_initialize - Initialize HW | ||
2503 | * @hw - HW module | ||
2504 | * | ||
2505 | */ | ||
2506 | static void | ||
2507 | csio_hw_initialize(struct csio_hw *hw) | ||
2508 | { | ||
2509 | struct csio_mb *mbp; | ||
2510 | enum fw_retval retval; | ||
2511 | int rv; | ||
2512 | int i; | ||
2513 | |||
2514 | if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { | ||
2515 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
2516 | if (!mbp) | ||
2517 | goto out; | ||
2518 | |||
2519 | csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); | ||
2520 | |||
2521 | if (csio_mb_issue(hw, mbp)) { | ||
2522 | csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n"); | ||
2523 | goto free_and_out; | ||
2524 | } | ||
2525 | |||
2526 | retval = csio_mb_fw_retval(mbp); | ||
2527 | if (retval != FW_SUCCESS) { | ||
2528 | csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n", | ||
2529 | retval); | ||
2530 | goto free_and_out; | ||
2531 | } | ||
2532 | |||
2533 | mempool_free(mbp, hw->mb_mempool); | ||
2534 | } | ||
2535 | |||
2536 | rv = csio_get_fcoe_resinfo(hw); | ||
2537 | if (rv != 0) { | ||
2538 | csio_err(hw, "Failed to read fcoe resource info: %d\n", rv); | ||
2539 | goto out; | ||
2540 | } | ||
2541 | |||
2542 | spin_unlock_irq(&hw->lock); | ||
2543 | rv = csio_config_queues(hw); | ||
2544 | spin_lock_irq(&hw->lock); | ||
2545 | |||
2546 | if (rv != 0) { | ||
2547 | csio_err(hw, "Config of queues failed!: %d\n", rv); | ||
2548 | goto out; | ||
2549 | } | ||
2550 | |||
2551 | for (i = 0; i < hw->num_pports; i++) | ||
2552 | hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA; | ||
2553 | |||
2554 | if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { | ||
2555 | rv = csio_enable_ports(hw); | ||
2556 | if (rv != 0) { | ||
2557 | csio_err(hw, "Failed to enable ports: %d\n", rv); | ||
2558 | goto out; | ||
2559 | } | ||
2560 | } | ||
2561 | |||
2562 | csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE); | ||
2563 | return; | ||
2564 | |||
2565 | free_and_out: | ||
2566 | mempool_free(mbp, hw->mb_mempool); | ||
2567 | out: | ||
2568 | return; | ||
2569 | } | ||
2570 | |||
2571 | #define PF_INTR_MASK (PFSW | PFCIM) | ||
2572 | |||
2573 | /* | ||
2574 | * csio_hw_intr_enable - Enable HW interrupts | ||
2575 | * @hw: Pointer to HW module. | ||
2576 | * | ||
2577 | * Enable interrupts in HW registers. | ||
2578 | */ | ||
2579 | static void | ||
2580 | csio_hw_intr_enable(struct csio_hw *hw) | ||
2581 | { | ||
2582 | uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw)); | ||
2583 | uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI)); | ||
2584 | uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE); | ||
2585 | |||
2586 | /* | ||
2587 | * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up | ||
2588 | * by FW, so do nothing for INTX. | ||
2589 | */ | ||
2590 | if (hw->intr_mode == CSIO_IM_MSIX) | ||
2591 | csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG), | ||
2592 | AIVEC(AIVEC_MASK), vec); | ||
2593 | else if (hw->intr_mode == CSIO_IM_MSI) | ||
2594 | csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG), | ||
2595 | AIVEC(AIVEC_MASK), 0); | ||
2596 | |||
2597 | csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE)); | ||
2598 | |||
2599 | /* Turn on MB interrupts - this will internally flush PIO as well */ | ||
2600 | csio_mb_intr_enable(hw); | ||
2601 | |||
2602 | /* These are common registers - only a master can modify them */ | ||
2603 | if (csio_is_hw_master(hw)) { | ||
2604 | /* | ||
2605 | * Disable the Serial FLASH interrupt, if enabled! | ||
2606 | */ | ||
2607 | pl &= (~SF); | ||
2608 | csio_wr_reg32(hw, pl, PL_INT_ENABLE); | ||
2609 | |||
2610 | csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE | | ||
2611 | EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC | | ||
2612 | ERR_CPL_OPCODE_0 | ERR_DROPPED_DB | | ||
2613 | ERR_DATA_CPL_ON_HIGH_QID1 | | ||
2614 | ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | | ||
2615 | ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | | ||
2616 | ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | | ||
2617 | ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR, | ||
2618 | SGE_INT_ENABLE3); | ||
2619 | csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf); | ||
2620 | } | ||
2621 | |||
2622 | hw->flags |= CSIO_HWF_HW_INTR_ENABLED; | ||
2623 | |||
2624 | } | ||
2625 | |||
2626 | /* | ||
2627 | * csio_hw_intr_disable - Disable HW interrupts | ||
2628 | * @hw: Pointer to HW module. | ||
2629 | * | ||
2630 | * Turn off Mailbox and PCI_PF_CFG interrupts. | ||
2631 | */ | ||
2632 | void | ||
2633 | csio_hw_intr_disable(struct csio_hw *hw) | ||
2634 | { | ||
2635 | uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI)); | ||
2636 | |||
2637 | if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED)) | ||
2638 | return; | ||
2639 | |||
2640 | hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED; | ||
2641 | |||
2642 | csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE)); | ||
2643 | if (csio_is_hw_master(hw)) | ||
2644 | csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0); | ||
2645 | |||
2646 | /* Turn off MB interrupts */ | ||
2647 | csio_mb_intr_disable(hw); | ||
2648 | |||
2649 | } | ||
2650 | |||
2651 | static void | ||
2652 | csio_hw_fatal_err(struct csio_hw *hw) | ||
2653 | { | ||
2654 | csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0); | ||
2655 | csio_hw_intr_disable(hw); | ||
2656 | |||
2657 | /* Do not reset HW, we may need FW state for debugging */ | ||
2658 | csio_fatal(hw, "HW Fatal error encountered!\n"); | ||
2659 | } | ||
2660 | |||
2661 | /*****************************************************************************/ | ||
2662 | /* START: HW SM */ | ||
2663 | /*****************************************************************************/ | ||
2664 | /* | ||
2665 | * csio_hws_uninit - Uninit state | ||
2666 | * @hw - HW module | ||
2667 | * @evt - Event | ||
2668 | * | ||
2669 | */ | ||
2670 | static void | ||
2671 | csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt) | ||
2672 | { | ||
2673 | hw->prev_evt = hw->cur_evt; | ||
2674 | hw->cur_evt = evt; | ||
2675 | CSIO_INC_STATS(hw, n_evt_sm[evt]); | ||
2676 | |||
2677 | switch (evt) { | ||
2678 | case CSIO_HWE_CFG: | ||
2679 | csio_set_state(&hw->sm, csio_hws_configuring); | ||
2680 | csio_hw_configure(hw); | ||
2681 | break; | ||
2682 | |||
2683 | default: | ||
2684 | CSIO_INC_STATS(hw, n_evt_unexp); | ||
2685 | break; | ||
2686 | } | ||
2687 | } | ||
2688 | |||
2689 | /* | ||
2690 | * csio_hws_configuring - Configuring state | ||
2691 | * @hw - HW module | ||
2692 | * @evt - Event | ||
2693 | * | ||
2694 | */ | ||
2695 | static void | ||
2696 | csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt) | ||
2697 | { | ||
2698 | hw->prev_evt = hw->cur_evt; | ||
2699 | hw->cur_evt = evt; | ||
2700 | CSIO_INC_STATS(hw, n_evt_sm[evt]); | ||
2701 | |||
2702 | switch (evt) { | ||
2703 | case CSIO_HWE_INIT: | ||
2704 | csio_set_state(&hw->sm, csio_hws_initializing); | ||
2705 | csio_hw_initialize(hw); | ||
2706 | break; | ||
2707 | |||
2708 | case CSIO_HWE_INIT_DONE: | ||
2709 | csio_set_state(&hw->sm, csio_hws_ready); | ||
2710 | /* Fan out event to all lnode SMs */ | ||
2711 | csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); | ||
2712 | break; | ||
2713 | |||
2714 | case CSIO_HWE_FATAL: | ||
2715 | csio_set_state(&hw->sm, csio_hws_uninit); | ||
2716 | break; | ||
2717 | |||
2718 | case CSIO_HWE_PCI_REMOVE: | ||
2719 | csio_do_bye(hw); | ||
2720 | break; | ||
2721 | default: | ||
2722 | CSIO_INC_STATS(hw, n_evt_unexp); | ||
2723 | break; | ||
2724 | } | ||
2725 | } | ||
2726 | |||
2727 | /* | ||
2728 | * csio_hws_initializing - Initialiazing state | ||
2729 | * @hw - HW module | ||
2730 | * @evt - Event | ||
2731 | * | ||
2732 | */ | ||
2733 | static void | ||
2734 | csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt) | ||
2735 | { | ||
2736 | hw->prev_evt = hw->cur_evt; | ||
2737 | hw->cur_evt = evt; | ||
2738 | CSIO_INC_STATS(hw, n_evt_sm[evt]); | ||
2739 | |||
2740 | switch (evt) { | ||
2741 | case CSIO_HWE_INIT_DONE: | ||
2742 | csio_set_state(&hw->sm, csio_hws_ready); | ||
2743 | |||
2744 | /* Fan out event to all lnode SMs */ | ||
2745 | csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); | ||
2746 | |||
2747 | /* Enable interrupts */ | ||
2748 | csio_hw_intr_enable(hw); | ||
2749 | break; | ||
2750 | |||
2751 | case CSIO_HWE_FATAL: | ||
2752 | csio_set_state(&hw->sm, csio_hws_uninit); | ||
2753 | break; | ||
2754 | |||
2755 | case CSIO_HWE_PCI_REMOVE: | ||
2756 | csio_do_bye(hw); | ||
2757 | break; | ||
2758 | |||
2759 | default: | ||
2760 | CSIO_INC_STATS(hw, n_evt_unexp); | ||
2761 | break; | ||
2762 | } | ||
2763 | } | ||
2764 | |||
2765 | /* | ||
2766 | * csio_hws_ready - Ready state | ||
2767 | * @hw - HW module | ||
2768 | * @evt - Event | ||
2769 | * | ||
2770 | */ | ||
2771 | static void | ||
2772 | csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt) | ||
2773 | { | ||
2774 | /* Remember the event */ | ||
2775 | hw->evtflag = evt; | ||
2776 | |||
2777 | hw->prev_evt = hw->cur_evt; | ||
2778 | hw->cur_evt = evt; | ||
2779 | CSIO_INC_STATS(hw, n_evt_sm[evt]); | ||
2780 | |||
2781 | switch (evt) { | ||
2782 | case CSIO_HWE_HBA_RESET: | ||
2783 | case CSIO_HWE_FW_DLOAD: | ||
2784 | case CSIO_HWE_SUSPEND: | ||
2785 | case CSIO_HWE_PCI_REMOVE: | ||
2786 | case CSIO_HWE_PCIERR_DETECTED: | ||
2787 | csio_set_state(&hw->sm, csio_hws_quiescing); | ||
2788 | /* cleanup all outstanding cmds */ | ||
2789 | if (evt == CSIO_HWE_HBA_RESET || | ||
2790 | evt == CSIO_HWE_PCIERR_DETECTED) | ||
2791 | csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false); | ||
2792 | else | ||
2793 | csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true); | ||
2794 | |||
2795 | csio_hw_intr_disable(hw); | ||
2796 | csio_hw_mbm_cleanup(hw); | ||
2797 | csio_evtq_stop(hw); | ||
2798 | csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP); | ||
2799 | csio_evtq_flush(hw); | ||
2800 | csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw)); | ||
2801 | csio_post_event(&hw->sm, CSIO_HWE_QUIESCED); | ||
2802 | break; | ||
2803 | |||
2804 | case CSIO_HWE_FATAL: | ||
2805 | csio_set_state(&hw->sm, csio_hws_uninit); | ||
2806 | break; | ||
2807 | |||
2808 | default: | ||
2809 | CSIO_INC_STATS(hw, n_evt_unexp); | ||
2810 | break; | ||
2811 | } | ||
2812 | } | ||
2813 | |||
2814 | /* | ||
2815 | * csio_hws_quiescing - Quiescing state | ||
2816 | * @hw - HW module | ||
2817 | * @evt - Event | ||
2818 | * | ||
2819 | */ | ||
2820 | static void | ||
2821 | csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt) | ||
2822 | { | ||
2823 | hw->prev_evt = hw->cur_evt; | ||
2824 | hw->cur_evt = evt; | ||
2825 | CSIO_INC_STATS(hw, n_evt_sm[evt]); | ||
2826 | |||
2827 | switch (evt) { | ||
2828 | case CSIO_HWE_QUIESCED: | ||
2829 | switch (hw->evtflag) { | ||
2830 | case CSIO_HWE_FW_DLOAD: | ||
2831 | csio_set_state(&hw->sm, csio_hws_resetting); | ||
2832 | /* Download firmware */ | ||
2833 | /* Fall through */ | ||
2834 | |||
2835 | case CSIO_HWE_HBA_RESET: | ||
2836 | csio_set_state(&hw->sm, csio_hws_resetting); | ||
2837 | /* Start reset of the HBA */ | ||
2838 | csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET); | ||
2839 | csio_wr_destroy_queues(hw, false); | ||
2840 | csio_do_reset(hw, false); | ||
2841 | csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE); | ||
2842 | break; | ||
2843 | |||
2844 | case CSIO_HWE_PCI_REMOVE: | ||
2845 | csio_set_state(&hw->sm, csio_hws_removing); | ||
2846 | csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE); | ||
2847 | csio_wr_destroy_queues(hw, true); | ||
2848 | /* Now send the bye command */ | ||
2849 | csio_do_bye(hw); | ||
2850 | break; | ||
2851 | |||
2852 | case CSIO_HWE_SUSPEND: | ||
2853 | csio_set_state(&hw->sm, csio_hws_quiesced); | ||
2854 | break; | ||
2855 | |||
2856 | case CSIO_HWE_PCIERR_DETECTED: | ||
2857 | csio_set_state(&hw->sm, csio_hws_pcierr); | ||
2858 | csio_wr_destroy_queues(hw, false); | ||
2859 | break; | ||
2860 | |||
2861 | default: | ||
2862 | CSIO_INC_STATS(hw, n_evt_unexp); | ||
2863 | break; | ||
2864 | |||
2865 | } | ||
2866 | break; | ||
2867 | |||
2868 | default: | ||
2869 | CSIO_INC_STATS(hw, n_evt_unexp); | ||
2870 | break; | ||
2871 | } | ||
2872 | } | ||
2873 | |||
2874 | /* | ||
2875 | * csio_hws_quiesced - Quiesced state | ||
2876 | * @hw - HW module | ||
2877 | * @evt - Event | ||
2878 | * | ||
2879 | */ | ||
2880 | static void | ||
2881 | csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt) | ||
2882 | { | ||
2883 | hw->prev_evt = hw->cur_evt; | ||
2884 | hw->cur_evt = evt; | ||
2885 | CSIO_INC_STATS(hw, n_evt_sm[evt]); | ||
2886 | |||
2887 | switch (evt) { | ||
2888 | case CSIO_HWE_RESUME: | ||
2889 | csio_set_state(&hw->sm, csio_hws_configuring); | ||
2890 | csio_hw_configure(hw); | ||
2891 | break; | ||
2892 | |||
2893 | default: | ||
2894 | CSIO_INC_STATS(hw, n_evt_unexp); | ||
2895 | break; | ||
2896 | } | ||
2897 | } | ||
2898 | |||
2899 | /* | ||
2900 | * csio_hws_resetting - HW Resetting state | ||
2901 | * @hw - HW module | ||
2902 | * @evt - Event | ||
2903 | * | ||
2904 | */ | ||
2905 | static void | ||
2906 | csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt) | ||
2907 | { | ||
2908 | hw->prev_evt = hw->cur_evt; | ||
2909 | hw->cur_evt = evt; | ||
2910 | CSIO_INC_STATS(hw, n_evt_sm[evt]); | ||
2911 | |||
2912 | switch (evt) { | ||
2913 | case CSIO_HWE_HBA_RESET_DONE: | ||
2914 | csio_evtq_start(hw); | ||
2915 | csio_set_state(&hw->sm, csio_hws_configuring); | ||
2916 | csio_hw_configure(hw); | ||
2917 | break; | ||
2918 | |||
2919 | default: | ||
2920 | CSIO_INC_STATS(hw, n_evt_unexp); | ||
2921 | break; | ||
2922 | } | ||
2923 | } | ||
2924 | |||
2925 | /* | ||
2926 | * csio_hws_removing - PCI Hotplug removing state | ||
2927 | * @hw - HW module | ||
2928 | * @evt - Event | ||
2929 | * | ||
2930 | */ | ||
2931 | static void | ||
2932 | csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt) | ||
2933 | { | ||
2934 | hw->prev_evt = hw->cur_evt; | ||
2935 | hw->cur_evt = evt; | ||
2936 | CSIO_INC_STATS(hw, n_evt_sm[evt]); | ||
2937 | |||
2938 | switch (evt) { | ||
2939 | case CSIO_HWE_HBA_RESET: | ||
2940 | if (!csio_is_hw_master(hw)) | ||
2941 | break; | ||
2942 | /* | ||
2943 | * The BYE should have alerady been issued, so we cant | ||
2944 | * use the mailbox interface. Hence we use the PL_RST | ||
2945 | * register directly. | ||
2946 | */ | ||
2947 | csio_err(hw, "Resetting HW and waiting 2 seconds...\n"); | ||
2948 | csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); | ||
2949 | mdelay(2000); | ||
2950 | break; | ||
2951 | |||
2952 | /* Should never receive any new events */ | ||
2953 | default: | ||
2954 | CSIO_INC_STATS(hw, n_evt_unexp); | ||
2955 | break; | ||
2956 | |||
2957 | } | ||
2958 | } | ||
2959 | |||
2960 | /* | ||
2961 | * csio_hws_pcierr - PCI Error state | ||
2962 | * @hw - HW module | ||
2963 | * @evt - Event | ||
2964 | * | ||
2965 | */ | ||
2966 | static void | ||
2967 | csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt) | ||
2968 | { | ||
2969 | hw->prev_evt = hw->cur_evt; | ||
2970 | hw->cur_evt = evt; | ||
2971 | CSIO_INC_STATS(hw, n_evt_sm[evt]); | ||
2972 | |||
2973 | switch (evt) { | ||
2974 | case CSIO_HWE_PCIERR_SLOT_RESET: | ||
2975 | csio_evtq_start(hw); | ||
2976 | csio_set_state(&hw->sm, csio_hws_configuring); | ||
2977 | csio_hw_configure(hw); | ||
2978 | break; | ||
2979 | |||
2980 | default: | ||
2981 | CSIO_INC_STATS(hw, n_evt_unexp); | ||
2982 | break; | ||
2983 | } | ||
2984 | } | ||
2985 | |||
2986 | /*****************************************************************************/ | ||
2987 | /* END: HW SM */ | ||
2988 | /*****************************************************************************/ | ||
2989 | |||
2990 | /* Slow path handlers */ | ||
2991 | struct intr_info { | ||
2992 | unsigned int mask; /* bits to check in interrupt status */ | ||
2993 | const char *msg; /* message to print or NULL */ | ||
2994 | short stat_idx; /* stat counter to increment or -1 */ | ||
2995 | unsigned short fatal; /* whether the condition reported is fatal */ | ||
2996 | }; | ||
2997 | |||
2998 | /* | ||
2999 | * csio_handle_intr_status - table driven interrupt handler | ||
3000 | * @hw: HW instance | ||
3001 | * @reg: the interrupt status register to process | ||
3002 | * @acts: table of interrupt actions | ||
3003 | * | ||
3004 | * A table driven interrupt handler that applies a set of masks to an | ||
3005 | * interrupt status word and performs the corresponding actions if the | ||
3006 | * interrupts described by the mask have occured. The actions include | ||
3007 | * optionally emitting a warning or alert message. The table is terminated | ||
3008 | * by an entry specifying mask 0. Returns the number of fatal interrupt | ||
3009 | * conditions. | ||
3010 | */ | ||
3011 | static int | ||
3012 | csio_handle_intr_status(struct csio_hw *hw, unsigned int reg, | ||
3013 | const struct intr_info *acts) | ||
3014 | { | ||
3015 | int fatal = 0; | ||
3016 | unsigned int mask = 0; | ||
3017 | unsigned int status = csio_rd_reg32(hw, reg); | ||
3018 | |||
3019 | for ( ; acts->mask; ++acts) { | ||
3020 | if (!(status & acts->mask)) | ||
3021 | continue; | ||
3022 | if (acts->fatal) { | ||
3023 | fatal++; | ||
3024 | csio_fatal(hw, "Fatal %s (0x%x)\n", | ||
3025 | acts->msg, status & acts->mask); | ||
3026 | } else if (acts->msg) | ||
3027 | csio_info(hw, "%s (0x%x)\n", | ||
3028 | acts->msg, status & acts->mask); | ||
3029 | mask |= acts->mask; | ||
3030 | } | ||
3031 | status &= mask; | ||
3032 | if (status) /* clear processed interrupts */ | ||
3033 | csio_wr_reg32(hw, status, reg); | ||
3034 | return fatal; | ||
3035 | } | ||
3036 | |||
3037 | /* | ||
3038 | * Interrupt handler for the PCIE module. | ||
3039 | */ | ||
3040 | static void | ||
3041 | csio_pcie_intr_handler(struct csio_hw *hw) | ||
3042 | { | ||
3043 | static struct intr_info sysbus_intr_info[] = { | ||
3044 | { RNPP, "RXNP array parity error", -1, 1 }, | ||
3045 | { RPCP, "RXPC array parity error", -1, 1 }, | ||
3046 | { RCIP, "RXCIF array parity error", -1, 1 }, | ||
3047 | { RCCP, "Rx completions control array parity error", -1, 1 }, | ||
3048 | { RFTP, "RXFT array parity error", -1, 1 }, | ||
3049 | { 0, NULL, 0, 0 } | ||
3050 | }; | ||
3051 | static struct intr_info pcie_port_intr_info[] = { | ||
3052 | { TPCP, "TXPC array parity error", -1, 1 }, | ||
3053 | { TNPP, "TXNP array parity error", -1, 1 }, | ||
3054 | { TFTP, "TXFT array parity error", -1, 1 }, | ||
3055 | { TCAP, "TXCA array parity error", -1, 1 }, | ||
3056 | { TCIP, "TXCIF array parity error", -1, 1 }, | ||
3057 | { RCAP, "RXCA array parity error", -1, 1 }, | ||
3058 | { OTDD, "outbound request TLP discarded", -1, 1 }, | ||
3059 | { RDPE, "Rx data parity error", -1, 1 }, | ||
3060 | { TDUE, "Tx uncorrectable data error", -1, 1 }, | ||
3061 | { 0, NULL, 0, 0 } | ||
3062 | }; | ||
3063 | static struct intr_info pcie_intr_info[] = { | ||
3064 | { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, | ||
3065 | { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, | ||
3066 | { MSIDATAPERR, "MSI data parity error", -1, 1 }, | ||
3067 | { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, | ||
3068 | { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, | ||
3069 | { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, | ||
3070 | { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, | ||
3071 | { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, | ||
3072 | { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, | ||
3073 | { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, | ||
3074 | { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, | ||
3075 | { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, | ||
3076 | { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, | ||
3077 | { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, | ||
3078 | { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, | ||
3079 | { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, | ||
3080 | { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, | ||
3081 | { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, | ||
3082 | { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, | ||
3083 | { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, | ||
3084 | { FIDPERR, "PCI FID parity error", -1, 1 }, | ||
3085 | { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, | ||
3086 | { MATAGPERR, "PCI MA tag parity error", -1, 1 }, | ||
3087 | { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, | ||
3088 | { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, | ||
3089 | { RXWRPERR, "PCI Rx write parity error", -1, 1 }, | ||
3090 | { RPLPERR, "PCI replay buffer parity error", -1, 1 }, | ||
3091 | { PCIESINT, "PCI core secondary fault", -1, 1 }, | ||
3092 | { PCIEPINT, "PCI core primary fault", -1, 1 }, | ||
3093 | { UNXSPLCPLERR, "PCI unexpected split completion error", -1, | ||
3094 | 0 }, | ||
3095 | { 0, NULL, 0, 0 } | ||
3096 | }; | ||
3097 | |||
3098 | int fat; | ||
3099 | |||
3100 | fat = csio_handle_intr_status(hw, | ||
3101 | PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, | ||
3102 | sysbus_intr_info) + | ||
3103 | csio_handle_intr_status(hw, | ||
3104 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, | ||
3105 | pcie_port_intr_info) + | ||
3106 | csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info); | ||
3107 | if (fat) | ||
3108 | csio_hw_fatal_err(hw); | ||
3109 | } | ||
3110 | |||
3111 | /* | ||
3112 | * TP interrupt handler. | ||
3113 | */ | ||
3114 | static void csio_tp_intr_handler(struct csio_hw *hw) | ||
3115 | { | ||
3116 | static struct intr_info tp_intr_info[] = { | ||
3117 | { 0x3fffffff, "TP parity error", -1, 1 }, | ||
3118 | { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, | ||
3119 | { 0, NULL, 0, 0 } | ||
3120 | }; | ||
3121 | |||
3122 | if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info)) | ||
3123 | csio_hw_fatal_err(hw); | ||
3124 | } | ||
3125 | |||
3126 | /* | ||
3127 | * SGE interrupt handler. | ||
3128 | */ | ||
3129 | static void csio_sge_intr_handler(struct csio_hw *hw) | ||
3130 | { | ||
3131 | uint64_t v; | ||
3132 | |||
3133 | static struct intr_info sge_intr_info[] = { | ||
3134 | { ERR_CPL_EXCEED_IQE_SIZE, | ||
3135 | "SGE received CPL exceeding IQE size", -1, 1 }, | ||
3136 | { ERR_INVALID_CIDX_INC, | ||
3137 | "SGE GTS CIDX increment too large", -1, 0 }, | ||
3138 | { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, | ||
3139 | { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, | ||
3140 | { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, | ||
3141 | "SGE IQID > 1023 received CPL for FL", -1, 0 }, | ||
3142 | { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, | ||
3143 | 0 }, | ||
3144 | { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, | ||
3145 | 0 }, | ||
3146 | { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, | ||
3147 | 0 }, | ||
3148 | { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, | ||
3149 | 0 }, | ||
3150 | { ERR_ING_CTXT_PRIO, | ||
3151 | "SGE too many priority ingress contexts", -1, 0 }, | ||
3152 | { ERR_EGR_CTXT_PRIO, | ||
3153 | "SGE too many priority egress contexts", -1, 0 }, | ||
3154 | { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, | ||
3155 | { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, | ||
3156 | { 0, NULL, 0, 0 } | ||
3157 | }; | ||
3158 | |||
3159 | v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) | | ||
3160 | ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32); | ||
3161 | if (v) { | ||
3162 | csio_fatal(hw, "SGE parity error (%#llx)\n", | ||
3163 | (unsigned long long)v); | ||
3164 | csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF), | ||
3165 | SGE_INT_CAUSE1); | ||
3166 | csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2); | ||
3167 | } | ||
3168 | |||
3169 | v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info); | ||
3170 | |||
3171 | if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) || | ||
3172 | v != 0) | ||
3173 | csio_hw_fatal_err(hw); | ||
3174 | } | ||
3175 | |||
3176 | #define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\ | ||
3177 | OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR) | ||
3178 | #define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\ | ||
3179 | IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR) | ||
3180 | |||
3181 | /* | ||
3182 | * CIM interrupt handler. | ||
3183 | */ | ||
3184 | static void csio_cim_intr_handler(struct csio_hw *hw) | ||
3185 | { | ||
3186 | static struct intr_info cim_intr_info[] = { | ||
3187 | { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, | ||
3188 | { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, | ||
3189 | { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, | ||
3190 | { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, | ||
3191 | { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, | ||
3192 | { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, | ||
3193 | { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, | ||
3194 | { 0, NULL, 0, 0 } | ||
3195 | }; | ||
3196 | static struct intr_info cim_upintr_info[] = { | ||
3197 | { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, | ||
3198 | { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, | ||
3199 | { ILLWRINT, "CIM illegal write", -1, 1 }, | ||
3200 | { ILLRDINT, "CIM illegal read", -1, 1 }, | ||
3201 | { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, | ||
3202 | { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, | ||
3203 | { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, | ||
3204 | { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, | ||
3205 | { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, | ||
3206 | { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, | ||
3207 | { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, | ||
3208 | { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, | ||
3209 | { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, | ||
3210 | { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, | ||
3211 | { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, | ||
3212 | { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, | ||
3213 | { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, | ||
3214 | { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, | ||
3215 | { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, | ||
3216 | { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, | ||
3217 | { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, | ||
3218 | { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, | ||
3219 | { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, | ||
3220 | { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, | ||
3221 | { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, | ||
3222 | { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, | ||
3223 | { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, | ||
3224 | { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, | ||
3225 | { 0, NULL, 0, 0 } | ||
3226 | }; | ||
3227 | |||
3228 | int fat; | ||
3229 | |||
3230 | fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE, | ||
3231 | cim_intr_info) + | ||
3232 | csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE, | ||
3233 | cim_upintr_info); | ||
3234 | if (fat) | ||
3235 | csio_hw_fatal_err(hw); | ||
3236 | } | ||
3237 | |||
3238 | /* | ||
3239 | * ULP RX interrupt handler. | ||
3240 | */ | ||
3241 | static void csio_ulprx_intr_handler(struct csio_hw *hw) | ||
3242 | { | ||
3243 | static struct intr_info ulprx_intr_info[] = { | ||
3244 | { 0x1800000, "ULPRX context error", -1, 1 }, | ||
3245 | { 0x7fffff, "ULPRX parity error", -1, 1 }, | ||
3246 | { 0, NULL, 0, 0 } | ||
3247 | }; | ||
3248 | |||
3249 | if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info)) | ||
3250 | csio_hw_fatal_err(hw); | ||
3251 | } | ||
3252 | |||
3253 | /* | ||
3254 | * ULP TX interrupt handler. | ||
3255 | */ | ||
3256 | static void csio_ulptx_intr_handler(struct csio_hw *hw) | ||
3257 | { | ||
3258 | static struct intr_info ulptx_intr_info[] = { | ||
3259 | { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, | ||
3260 | 0 }, | ||
3261 | { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, | ||
3262 | 0 }, | ||
3263 | { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, | ||
3264 | 0 }, | ||
3265 | { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, | ||
3266 | 0 }, | ||
3267 | { 0xfffffff, "ULPTX parity error", -1, 1 }, | ||
3268 | { 0, NULL, 0, 0 } | ||
3269 | }; | ||
3270 | |||
3271 | if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info)) | ||
3272 | csio_hw_fatal_err(hw); | ||
3273 | } | ||
3274 | |||
3275 | /* | ||
3276 | * PM TX interrupt handler. | ||
3277 | */ | ||
3278 | static void csio_pmtx_intr_handler(struct csio_hw *hw) | ||
3279 | { | ||
3280 | static struct intr_info pmtx_intr_info[] = { | ||
3281 | { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, | ||
3282 | { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, | ||
3283 | { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, | ||
3284 | { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, | ||
3285 | { 0xffffff0, "PMTX framing error", -1, 1 }, | ||
3286 | { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, | ||
3287 | { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, | ||
3288 | 1 }, | ||
3289 | { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, | ||
3290 | { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, | ||
3291 | { 0, NULL, 0, 0 } | ||
3292 | }; | ||
3293 | |||
3294 | if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info)) | ||
3295 | csio_hw_fatal_err(hw); | ||
3296 | } | ||
3297 | |||
3298 | /* | ||
3299 | * PM RX interrupt handler. | ||
3300 | */ | ||
3301 | static void csio_pmrx_intr_handler(struct csio_hw *hw) | ||
3302 | { | ||
3303 | static struct intr_info pmrx_intr_info[] = { | ||
3304 | { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, | ||
3305 | { 0x3ffff0, "PMRX framing error", -1, 1 }, | ||
3306 | { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, | ||
3307 | { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, | ||
3308 | 1 }, | ||
3309 | { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, | ||
3310 | { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, | ||
3311 | { 0, NULL, 0, 0 } | ||
3312 | }; | ||
3313 | |||
3314 | if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info)) | ||
3315 | csio_hw_fatal_err(hw); | ||
3316 | } | ||
3317 | |||
3318 | /* | ||
3319 | * CPL switch interrupt handler. | ||
3320 | */ | ||
3321 | static void csio_cplsw_intr_handler(struct csio_hw *hw) | ||
3322 | { | ||
3323 | static struct intr_info cplsw_intr_info[] = { | ||
3324 | { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, | ||
3325 | { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, | ||
3326 | { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, | ||
3327 | { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, | ||
3328 | { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, | ||
3329 | { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, | ||
3330 | { 0, NULL, 0, 0 } | ||
3331 | }; | ||
3332 | |||
3333 | if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info)) | ||
3334 | csio_hw_fatal_err(hw); | ||
3335 | } | ||
3336 | |||
3337 | /* | ||
3338 | * LE interrupt handler. | ||
3339 | */ | ||
3340 | static void csio_le_intr_handler(struct csio_hw *hw) | ||
3341 | { | ||
3342 | static struct intr_info le_intr_info[] = { | ||
3343 | { LIPMISS, "LE LIP miss", -1, 0 }, | ||
3344 | { LIP0, "LE 0 LIP error", -1, 0 }, | ||
3345 | { PARITYERR, "LE parity error", -1, 1 }, | ||
3346 | { UNKNOWNCMD, "LE unknown command", -1, 1 }, | ||
3347 | { REQQPARERR, "LE request queue parity error", -1, 1 }, | ||
3348 | { 0, NULL, 0, 0 } | ||
3349 | }; | ||
3350 | |||
3351 | if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info)) | ||
3352 | csio_hw_fatal_err(hw); | ||
3353 | } | ||
3354 | |||
3355 | /* | ||
3356 | * MPS interrupt handler. | ||
3357 | */ | ||
3358 | static void csio_mps_intr_handler(struct csio_hw *hw) | ||
3359 | { | ||
3360 | static struct intr_info mps_rx_intr_info[] = { | ||
3361 | { 0xffffff, "MPS Rx parity error", -1, 1 }, | ||
3362 | { 0, NULL, 0, 0 } | ||
3363 | }; | ||
3364 | static struct intr_info mps_tx_intr_info[] = { | ||
3365 | { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, | ||
3366 | { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, | ||
3367 | { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, | ||
3368 | { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, | ||
3369 | { BUBBLE, "MPS Tx underflow", -1, 1 }, | ||
3370 | { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, | ||
3371 | { FRMERR, "MPS Tx framing error", -1, 1 }, | ||
3372 | { 0, NULL, 0, 0 } | ||
3373 | }; | ||
3374 | static struct intr_info mps_trc_intr_info[] = { | ||
3375 | { FILTMEM, "MPS TRC filter parity error", -1, 1 }, | ||
3376 | { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, | ||
3377 | { MISCPERR, "MPS TRC misc parity error", -1, 1 }, | ||
3378 | { 0, NULL, 0, 0 } | ||
3379 | }; | ||
3380 | static struct intr_info mps_stat_sram_intr_info[] = { | ||
3381 | { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, | ||
3382 | { 0, NULL, 0, 0 } | ||
3383 | }; | ||
3384 | static struct intr_info mps_stat_tx_intr_info[] = { | ||
3385 | { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, | ||
3386 | { 0, NULL, 0, 0 } | ||
3387 | }; | ||
3388 | static struct intr_info mps_stat_rx_intr_info[] = { | ||
3389 | { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, | ||
3390 | { 0, NULL, 0, 0 } | ||
3391 | }; | ||
3392 | static struct intr_info mps_cls_intr_info[] = { | ||
3393 | { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, | ||
3394 | { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, | ||
3395 | { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, | ||
3396 | { 0, NULL, 0, 0 } | ||
3397 | }; | ||
3398 | |||
3399 | int fat; | ||
3400 | |||
3401 | fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE, | ||
3402 | mps_rx_intr_info) + | ||
3403 | csio_handle_intr_status(hw, MPS_TX_INT_CAUSE, | ||
3404 | mps_tx_intr_info) + | ||
3405 | csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE, | ||
3406 | mps_trc_intr_info) + | ||
3407 | csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM, | ||
3408 | mps_stat_sram_intr_info) + | ||
3409 | csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, | ||
3410 | mps_stat_tx_intr_info) + | ||
3411 | csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, | ||
3412 | mps_stat_rx_intr_info) + | ||
3413 | csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE, | ||
3414 | mps_cls_intr_info); | ||
3415 | |||
3416 | csio_wr_reg32(hw, 0, MPS_INT_CAUSE); | ||
3417 | csio_rd_reg32(hw, MPS_INT_CAUSE); /* flush */ | ||
3418 | if (fat) | ||
3419 | csio_hw_fatal_err(hw); | ||
3420 | } | ||
3421 | |||
3422 | #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) | ||
3423 | |||
3424 | /* | ||
3425 | * EDC/MC interrupt handler. | ||
3426 | */ | ||
3427 | static void csio_mem_intr_handler(struct csio_hw *hw, int idx) | ||
3428 | { | ||
3429 | static const char name[3][5] = { "EDC0", "EDC1", "MC" }; | ||
3430 | |||
3431 | unsigned int addr, cnt_addr, v; | ||
3432 | |||
3433 | if (idx <= MEM_EDC1) { | ||
3434 | addr = EDC_REG(EDC_INT_CAUSE, idx); | ||
3435 | cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); | ||
3436 | } else { | ||
3437 | addr = MC_INT_CAUSE; | ||
3438 | cnt_addr = MC_ECC_STATUS; | ||
3439 | } | ||
3440 | |||
3441 | v = csio_rd_reg32(hw, addr) & MEM_INT_MASK; | ||
3442 | if (v & PERR_INT_CAUSE) | ||
3443 | csio_fatal(hw, "%s FIFO parity error\n", name[idx]); | ||
3444 | if (v & ECC_CE_INT_CAUSE) { | ||
3445 | uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr)); | ||
3446 | |||
3447 | csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr); | ||
3448 | csio_warn(hw, "%u %s correctable ECC data error%s\n", | ||
3449 | cnt, name[idx], cnt > 1 ? "s" : ""); | ||
3450 | } | ||
3451 | if (v & ECC_UE_INT_CAUSE) | ||
3452 | csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]); | ||
3453 | |||
3454 | csio_wr_reg32(hw, v, addr); | ||
3455 | if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) | ||
3456 | csio_hw_fatal_err(hw); | ||
3457 | } | ||
3458 | |||
3459 | /* | ||
3460 | * MA interrupt handler. | ||
3461 | */ | ||
3462 | static void csio_ma_intr_handler(struct csio_hw *hw) | ||
3463 | { | ||
3464 | uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE); | ||
3465 | |||
3466 | if (status & MEM_PERR_INT_CAUSE) | ||
3467 | csio_fatal(hw, "MA parity error, parity status %#x\n", | ||
3468 | csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS)); | ||
3469 | if (status & MEM_WRAP_INT_CAUSE) { | ||
3470 | v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS); | ||
3471 | csio_fatal(hw, | ||
3472 | "MA address wrap-around error by client %u to address %#x\n", | ||
3473 | MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4); | ||
3474 | } | ||
3475 | csio_wr_reg32(hw, status, MA_INT_CAUSE); | ||
3476 | csio_hw_fatal_err(hw); | ||
3477 | } | ||
3478 | |||
3479 | /* | ||
3480 | * SMB interrupt handler. | ||
3481 | */ | ||
3482 | static void csio_smb_intr_handler(struct csio_hw *hw) | ||
3483 | { | ||
3484 | static struct intr_info smb_intr_info[] = { | ||
3485 | { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, | ||
3486 | { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, | ||
3487 | { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, | ||
3488 | { 0, NULL, 0, 0 } | ||
3489 | }; | ||
3490 | |||
3491 | if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info)) | ||
3492 | csio_hw_fatal_err(hw); | ||
3493 | } | ||
3494 | |||
3495 | /* | ||
3496 | * NC-SI interrupt handler. | ||
3497 | */ | ||
3498 | static void csio_ncsi_intr_handler(struct csio_hw *hw) | ||
3499 | { | ||
3500 | static struct intr_info ncsi_intr_info[] = { | ||
3501 | { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, | ||
3502 | { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, | ||
3503 | { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, | ||
3504 | { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, | ||
3505 | { 0, NULL, 0, 0 } | ||
3506 | }; | ||
3507 | |||
3508 | if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info)) | ||
3509 | csio_hw_fatal_err(hw); | ||
3510 | } | ||
3511 | |||
3512 | /* | ||
3513 | * XGMAC interrupt handler. | ||
3514 | */ | ||
3515 | static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) | ||
3516 | { | ||
3517 | uint32_t v = csio_rd_reg32(hw, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); | ||
3518 | |||
3519 | v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; | ||
3520 | if (!v) | ||
3521 | return; | ||
3522 | |||
3523 | if (v & TXFIFO_PRTY_ERR) | ||
3524 | csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); | ||
3525 | if (v & RXFIFO_PRTY_ERR) | ||
3526 | csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); | ||
3527 | csio_wr_reg32(hw, v, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); | ||
3528 | csio_hw_fatal_err(hw); | ||
3529 | } | ||
3530 | |||
3531 | /* | ||
3532 | * PL interrupt handler. | ||
3533 | */ | ||
3534 | static void csio_pl_intr_handler(struct csio_hw *hw) | ||
3535 | { | ||
3536 | static struct intr_info pl_intr_info[] = { | ||
3537 | { FATALPERR, "T4 fatal parity error", -1, 1 }, | ||
3538 | { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, | ||
3539 | { 0, NULL, 0, 0 } | ||
3540 | }; | ||
3541 | |||
3542 | if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info)) | ||
3543 | csio_hw_fatal_err(hw); | ||
3544 | } | ||
3545 | |||
3546 | /* | ||
3547 | * csio_hw_slow_intr_handler - control path interrupt handler | ||
3548 | * @hw: HW module | ||
3549 | * | ||
3550 | * Interrupt handler for non-data global interrupt events, e.g., errors. | ||
3551 | * The designation 'slow' is because it involves register reads, while | ||
3552 | * data interrupts typically don't involve any MMIOs. | ||
3553 | */ | ||
3554 | int | ||
3555 | csio_hw_slow_intr_handler(struct csio_hw *hw) | ||
3556 | { | ||
3557 | uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE); | ||
3558 | |||
3559 | if (!(cause & CSIO_GLBL_INTR_MASK)) { | ||
3560 | CSIO_INC_STATS(hw, n_plint_unexp); | ||
3561 | return 0; | ||
3562 | } | ||
3563 | |||
3564 | csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause); | ||
3565 | |||
3566 | CSIO_INC_STATS(hw, n_plint_cnt); | ||
3567 | |||
3568 | if (cause & CIM) | ||
3569 | csio_cim_intr_handler(hw); | ||
3570 | |||
3571 | if (cause & MPS) | ||
3572 | csio_mps_intr_handler(hw); | ||
3573 | |||
3574 | if (cause & NCSI) | ||
3575 | csio_ncsi_intr_handler(hw); | ||
3576 | |||
3577 | if (cause & PL) | ||
3578 | csio_pl_intr_handler(hw); | ||
3579 | |||
3580 | if (cause & SMB) | ||
3581 | csio_smb_intr_handler(hw); | ||
3582 | |||
3583 | if (cause & XGMAC0) | ||
3584 | csio_xgmac_intr_handler(hw, 0); | ||
3585 | |||
3586 | if (cause & XGMAC1) | ||
3587 | csio_xgmac_intr_handler(hw, 1); | ||
3588 | |||
3589 | if (cause & XGMAC_KR0) | ||
3590 | csio_xgmac_intr_handler(hw, 2); | ||
3591 | |||
3592 | if (cause & XGMAC_KR1) | ||
3593 | csio_xgmac_intr_handler(hw, 3); | ||
3594 | |||
3595 | if (cause & PCIE) | ||
3596 | csio_pcie_intr_handler(hw); | ||
3597 | |||
3598 | if (cause & MC) | ||
3599 | csio_mem_intr_handler(hw, MEM_MC); | ||
3600 | |||
3601 | if (cause & EDC0) | ||
3602 | csio_mem_intr_handler(hw, MEM_EDC0); | ||
3603 | |||
3604 | if (cause & EDC1) | ||
3605 | csio_mem_intr_handler(hw, MEM_EDC1); | ||
3606 | |||
3607 | if (cause & LE) | ||
3608 | csio_le_intr_handler(hw); | ||
3609 | |||
3610 | if (cause & TP) | ||
3611 | csio_tp_intr_handler(hw); | ||
3612 | |||
3613 | if (cause & MA) | ||
3614 | csio_ma_intr_handler(hw); | ||
3615 | |||
3616 | if (cause & PM_TX) | ||
3617 | csio_pmtx_intr_handler(hw); | ||
3618 | |||
3619 | if (cause & PM_RX) | ||
3620 | csio_pmrx_intr_handler(hw); | ||
3621 | |||
3622 | if (cause & ULP_RX) | ||
3623 | csio_ulprx_intr_handler(hw); | ||
3624 | |||
3625 | if (cause & CPL_SWITCH) | ||
3626 | csio_cplsw_intr_handler(hw); | ||
3627 | |||
3628 | if (cause & SGE) | ||
3629 | csio_sge_intr_handler(hw); | ||
3630 | |||
3631 | if (cause & ULP_TX) | ||
3632 | csio_ulptx_intr_handler(hw); | ||
3633 | |||
3634 | /* Clear the interrupts just processed for which we are the master. */ | ||
3635 | csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE); | ||
3636 | csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */ | ||
3637 | |||
3638 | return 1; | ||
3639 | } | ||
3640 | |||
3641 | /***************************************************************************** | ||
3642 | * HW <--> mailbox interfacing routines. | ||
3643 | ****************************************************************************/ | ||
3644 | /* | ||
3645 | * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions | ||
3646 | * | ||
3647 | * @data: Private data pointer. | ||
3648 | * | ||
3649 | * Called from worker thread context. | ||
3650 | */ | ||
3651 | static void | ||
3652 | csio_mberr_worker(void *data) | ||
3653 | { | ||
3654 | struct csio_hw *hw = (struct csio_hw *)data; | ||
3655 | struct csio_mbm *mbm = &hw->mbm; | ||
3656 | LIST_HEAD(cbfn_q); | ||
3657 | struct csio_mb *mbp_next; | ||
3658 | int rv; | ||
3659 | |||
3660 | del_timer_sync(&mbm->timer); | ||
3661 | |||
3662 | spin_lock_irq(&hw->lock); | ||
3663 | if (list_empty(&mbm->cbfn_q)) { | ||
3664 | spin_unlock_irq(&hw->lock); | ||
3665 | return; | ||
3666 | } | ||
3667 | |||
3668 | list_splice_tail_init(&mbm->cbfn_q, &cbfn_q); | ||
3669 | mbm->stats.n_cbfnq = 0; | ||
3670 | |||
3671 | /* Try to start waiting mailboxes */ | ||
3672 | if (!list_empty(&mbm->req_q)) { | ||
3673 | mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list); | ||
3674 | list_del_init(&mbp_next->list); | ||
3675 | |||
3676 | rv = csio_mb_issue(hw, mbp_next); | ||
3677 | if (rv != 0) | ||
3678 | list_add_tail(&mbp_next->list, &mbm->req_q); | ||
3679 | else | ||
3680 | CSIO_DEC_STATS(mbm, n_activeq); | ||
3681 | } | ||
3682 | spin_unlock_irq(&hw->lock); | ||
3683 | |||
3684 | /* Now callback completions */ | ||
3685 | csio_mb_completions(hw, &cbfn_q); | ||
3686 | } | ||
3687 | |||
3688 | /* | ||
3689 | * csio_hw_mb_timer - Top-level Mailbox timeout handler. | ||
3690 | * | ||
3691 | * @data: private data pointer | ||
3692 | * | ||
3693 | **/ | ||
3694 | static void | ||
3695 | csio_hw_mb_timer(uintptr_t data) | ||
3696 | { | ||
3697 | struct csio_hw *hw = (struct csio_hw *)data; | ||
3698 | struct csio_mb *mbp = NULL; | ||
3699 | |||
3700 | spin_lock_irq(&hw->lock); | ||
3701 | mbp = csio_mb_tmo_handler(hw); | ||
3702 | spin_unlock_irq(&hw->lock); | ||
3703 | |||
3704 | /* Call back the function for the timed-out Mailbox */ | ||
3705 | if (mbp) | ||
3706 | mbp->mb_cbfn(hw, mbp); | ||
3707 | |||
3708 | } | ||
3709 | |||
3710 | /* | ||
3711 | * csio_hw_mbm_cleanup - Cleanup Mailbox module. | ||
3712 | * @hw: HW module | ||
3713 | * | ||
3714 | * Called with lock held, should exit with lock held. | ||
3715 | * Cancels outstanding mailboxes (waiting, in-flight) and gathers them | ||
3716 | * into a local queue. Drops lock and calls the completions. Holds | ||
3717 | * lock and returns. | ||
3718 | */ | ||
3719 | static void | ||
3720 | csio_hw_mbm_cleanup(struct csio_hw *hw) | ||
3721 | { | ||
3722 | LIST_HEAD(cbfn_q); | ||
3723 | |||
3724 | csio_mb_cancel_all(hw, &cbfn_q); | ||
3725 | |||
3726 | spin_unlock_irq(&hw->lock); | ||
3727 | csio_mb_completions(hw, &cbfn_q); | ||
3728 | spin_lock_irq(&hw->lock); | ||
3729 | } | ||
3730 | |||
3731 | /***************************************************************************** | ||
3732 | * Event handling | ||
3733 | ****************************************************************************/ | ||
3734 | int | ||
3735 | csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg, | ||
3736 | uint16_t len) | ||
3737 | { | ||
3738 | struct csio_evt_msg *evt_entry = NULL; | ||
3739 | |||
3740 | if (type >= CSIO_EVT_MAX) | ||
3741 | return -EINVAL; | ||
3742 | |||
3743 | if (len > CSIO_EVT_MSG_SIZE) | ||
3744 | return -EINVAL; | ||
3745 | |||
3746 | if (hw->flags & CSIO_HWF_FWEVT_STOP) | ||
3747 | return -EINVAL; | ||
3748 | |||
3749 | if (list_empty(&hw->evt_free_q)) { | ||
3750 | csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", | ||
3751 | type, len); | ||
3752 | return -ENOMEM; | ||
3753 | } | ||
3754 | |||
3755 | evt_entry = list_first_entry(&hw->evt_free_q, | ||
3756 | struct csio_evt_msg, list); | ||
3757 | list_del_init(&evt_entry->list); | ||
3758 | |||
3759 | /* copy event msg and queue the event */ | ||
3760 | evt_entry->type = type; | ||
3761 | memcpy((void *)evt_entry->data, evt_msg, len); | ||
3762 | list_add_tail(&evt_entry->list, &hw->evt_active_q); | ||
3763 | |||
3764 | CSIO_DEC_STATS(hw, n_evt_freeq); | ||
3765 | CSIO_INC_STATS(hw, n_evt_activeq); | ||
3766 | |||
3767 | return 0; | ||
3768 | } | ||
3769 | |||
3770 | static int | ||
3771 | csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg, | ||
3772 | uint16_t len, bool msg_sg) | ||
3773 | { | ||
3774 | struct csio_evt_msg *evt_entry = NULL; | ||
3775 | struct csio_fl_dma_buf *fl_sg; | ||
3776 | uint32_t off = 0; | ||
3777 | unsigned long flags; | ||
3778 | int n, ret = 0; | ||
3779 | |||
3780 | if (type >= CSIO_EVT_MAX) | ||
3781 | return -EINVAL; | ||
3782 | |||
3783 | if (len > CSIO_EVT_MSG_SIZE) | ||
3784 | return -EINVAL; | ||
3785 | |||
3786 | spin_lock_irqsave(&hw->lock, flags); | ||
3787 | if (hw->flags & CSIO_HWF_FWEVT_STOP) { | ||
3788 | ret = -EINVAL; | ||
3789 | goto out; | ||
3790 | } | ||
3791 | |||
3792 | if (list_empty(&hw->evt_free_q)) { | ||
3793 | csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", | ||
3794 | type, len); | ||
3795 | ret = -ENOMEM; | ||
3796 | goto out; | ||
3797 | } | ||
3798 | |||
3799 | evt_entry = list_first_entry(&hw->evt_free_q, | ||
3800 | struct csio_evt_msg, list); | ||
3801 | list_del_init(&evt_entry->list); | ||
3802 | |||
3803 | /* copy event msg and queue the event */ | ||
3804 | evt_entry->type = type; | ||
3805 | |||
3806 | /* If Payload in SG list*/ | ||
3807 | if (msg_sg) { | ||
3808 | fl_sg = (struct csio_fl_dma_buf *) evt_msg; | ||
3809 | for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) { | ||
3810 | memcpy((void *)((uintptr_t)evt_entry->data + off), | ||
3811 | fl_sg->flbufs[n].vaddr, | ||
3812 | fl_sg->flbufs[n].len); | ||
3813 | off += fl_sg->flbufs[n].len; | ||
3814 | } | ||
3815 | } else | ||
3816 | memcpy((void *)evt_entry->data, evt_msg, len); | ||
3817 | |||
3818 | list_add_tail(&evt_entry->list, &hw->evt_active_q); | ||
3819 | CSIO_DEC_STATS(hw, n_evt_freeq); | ||
3820 | CSIO_INC_STATS(hw, n_evt_activeq); | ||
3821 | out: | ||
3822 | spin_unlock_irqrestore(&hw->lock, flags); | ||
3823 | return ret; | ||
3824 | } | ||
3825 | |||
3826 | static void | ||
3827 | csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry) | ||
3828 | { | ||
3829 | if (evt_entry) { | ||
3830 | spin_lock_irq(&hw->lock); | ||
3831 | list_del_init(&evt_entry->list); | ||
3832 | list_add_tail(&evt_entry->list, &hw->evt_free_q); | ||
3833 | CSIO_DEC_STATS(hw, n_evt_activeq); | ||
3834 | CSIO_INC_STATS(hw, n_evt_freeq); | ||
3835 | spin_unlock_irq(&hw->lock); | ||
3836 | } | ||
3837 | } | ||
3838 | |||
3839 | void | ||
3840 | csio_evtq_flush(struct csio_hw *hw) | ||
3841 | { | ||
3842 | uint32_t count; | ||
3843 | count = 30; | ||
3844 | while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) { | ||
3845 | spin_unlock_irq(&hw->lock); | ||
3846 | msleep(2000); | ||
3847 | spin_lock_irq(&hw->lock); | ||
3848 | } | ||
3849 | |||
3850 | CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING)); | ||
3851 | } | ||
3852 | |||
3853 | static void | ||
3854 | csio_evtq_stop(struct csio_hw *hw) | ||
3855 | { | ||
3856 | hw->flags |= CSIO_HWF_FWEVT_STOP; | ||
3857 | } | ||
3858 | |||
3859 | static void | ||
3860 | csio_evtq_start(struct csio_hw *hw) | ||
3861 | { | ||
3862 | hw->flags &= ~CSIO_HWF_FWEVT_STOP; | ||
3863 | } | ||
3864 | |||
3865 | static void | ||
3866 | csio_evtq_cleanup(struct csio_hw *hw) | ||
3867 | { | ||
3868 | struct list_head *evt_entry, *next_entry; | ||
3869 | |||
3870 | /* Release outstanding events from activeq to freeq*/ | ||
3871 | if (!list_empty(&hw->evt_active_q)) | ||
3872 | list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q); | ||
3873 | |||
3874 | hw->stats.n_evt_activeq = 0; | ||
3875 | hw->flags &= ~CSIO_HWF_FWEVT_PENDING; | ||
3876 | |||
3877 | /* Freeup event entry */ | ||
3878 | list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) { | ||
3879 | kfree(evt_entry); | ||
3880 | CSIO_DEC_STATS(hw, n_evt_freeq); | ||
3881 | } | ||
3882 | |||
3883 | hw->stats.n_evt_freeq = 0; | ||
3884 | } | ||
3885 | |||
3886 | |||
3887 | static void | ||
3888 | csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len, | ||
3889 | struct csio_fl_dma_buf *flb, void *priv) | ||
3890 | { | ||
3891 | __u8 op; | ||
3892 | __be64 *data; | ||
3893 | void *msg = NULL; | ||
3894 | uint32_t msg_len = 0; | ||
3895 | bool msg_sg = 0; | ||
3896 | |||
3897 | op = ((struct rss_header *) wr)->opcode; | ||
3898 | if (op == CPL_FW6_PLD) { | ||
3899 | CSIO_INC_STATS(hw, n_cpl_fw6_pld); | ||
3900 | if (!flb || !flb->totlen) { | ||
3901 | CSIO_INC_STATS(hw, n_cpl_unexp); | ||
3902 | return; | ||
3903 | } | ||
3904 | |||
3905 | msg = (void *) flb; | ||
3906 | msg_len = flb->totlen; | ||
3907 | msg_sg = 1; | ||
3908 | |||
3909 | data = (__be64 *) msg; | ||
3910 | } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) { | ||
3911 | |||
3912 | CSIO_INC_STATS(hw, n_cpl_fw6_msg); | ||
3913 | /* skip RSS header */ | ||
3914 | msg = (void *)((uintptr_t)wr + sizeof(__be64)); | ||
3915 | msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) : | ||
3916 | sizeof(struct cpl_fw4_msg); | ||
3917 | |||
3918 | data = (__be64 *) msg; | ||
3919 | } else { | ||
3920 | csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op); | ||
3921 | CSIO_INC_STATS(hw, n_cpl_unexp); | ||
3922 | return; | ||
3923 | } | ||
3924 | |||
3925 | /* | ||
3926 | * Enqueue event to EventQ. Events processing happens | ||
3927 | * in Event worker thread context | ||
3928 | */ | ||
3929 | if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg, | ||
3930 | (uint16_t)msg_len, msg_sg)) | ||
3931 | CSIO_INC_STATS(hw, n_evt_drop); | ||
3932 | } | ||
3933 | |||
3934 | void | ||
3935 | csio_evtq_worker(struct work_struct *work) | ||
3936 | { | ||
3937 | struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work); | ||
3938 | struct list_head *evt_entry, *next_entry; | ||
3939 | LIST_HEAD(evt_q); | ||
3940 | struct csio_evt_msg *evt_msg; | ||
3941 | struct cpl_fw6_msg *msg; | ||
3942 | struct csio_rnode *rn; | ||
3943 | int rv = 0; | ||
3944 | uint8_t evtq_stop = 0; | ||
3945 | |||
3946 | csio_dbg(hw, "event worker thread active evts#%d\n", | ||
3947 | hw->stats.n_evt_activeq); | ||
3948 | |||
3949 | spin_lock_irq(&hw->lock); | ||
3950 | while (!list_empty(&hw->evt_active_q)) { | ||
3951 | list_splice_tail_init(&hw->evt_active_q, &evt_q); | ||
3952 | spin_unlock_irq(&hw->lock); | ||
3953 | |||
3954 | list_for_each_safe(evt_entry, next_entry, &evt_q) { | ||
3955 | evt_msg = (struct csio_evt_msg *) evt_entry; | ||
3956 | |||
3957 | /* Drop events if queue is STOPPED */ | ||
3958 | spin_lock_irq(&hw->lock); | ||
3959 | if (hw->flags & CSIO_HWF_FWEVT_STOP) | ||
3960 | evtq_stop = 1; | ||
3961 | spin_unlock_irq(&hw->lock); | ||
3962 | if (evtq_stop) { | ||
3963 | CSIO_INC_STATS(hw, n_evt_drop); | ||
3964 | goto free_evt; | ||
3965 | } | ||
3966 | |||
3967 | switch (evt_msg->type) { | ||
3968 | case CSIO_EVT_FW: | ||
3969 | msg = (struct cpl_fw6_msg *)(evt_msg->data); | ||
3970 | |||
3971 | if ((msg->opcode == CPL_FW6_MSG || | ||
3972 | msg->opcode == CPL_FW4_MSG) && | ||
3973 | !msg->type) { | ||
3974 | rv = csio_mb_fwevt_handler(hw, | ||
3975 | msg->data); | ||
3976 | if (!rv) | ||
3977 | break; | ||
3978 | /* Handle any remaining fw events */ | ||
3979 | csio_fcoe_fwevt_handler(hw, | ||
3980 | msg->opcode, msg->data); | ||
3981 | } else if (msg->opcode == CPL_FW6_PLD) { | ||
3982 | |||
3983 | csio_fcoe_fwevt_handler(hw, | ||
3984 | msg->opcode, msg->data); | ||
3985 | } else { | ||
3986 | csio_warn(hw, | ||
3987 | "Unhandled FW msg op %x type %x\n", | ||
3988 | msg->opcode, msg->type); | ||
3989 | CSIO_INC_STATS(hw, n_evt_drop); | ||
3990 | } | ||
3991 | break; | ||
3992 | |||
3993 | case CSIO_EVT_MBX: | ||
3994 | csio_mberr_worker(hw); | ||
3995 | break; | ||
3996 | |||
3997 | case CSIO_EVT_DEV_LOSS: | ||
3998 | memcpy(&rn, evt_msg->data, sizeof(rn)); | ||
3999 | csio_rnode_devloss_handler(rn); | ||
4000 | break; | ||
4001 | |||
4002 | default: | ||
4003 | csio_warn(hw, "Unhandled event %x on evtq\n", | ||
4004 | evt_msg->type); | ||
4005 | CSIO_INC_STATS(hw, n_evt_unexp); | ||
4006 | break; | ||
4007 | } | ||
4008 | free_evt: | ||
4009 | csio_free_evt(hw, evt_msg); | ||
4010 | } | ||
4011 | |||
4012 | spin_lock_irq(&hw->lock); | ||
4013 | } | ||
4014 | hw->flags &= ~CSIO_HWF_FWEVT_PENDING; | ||
4015 | spin_unlock_irq(&hw->lock); | ||
4016 | } | ||
4017 | |||
4018 | int | ||
4019 | csio_fwevtq_handler(struct csio_hw *hw) | ||
4020 | { | ||
4021 | int rv; | ||
4022 | |||
4023 | if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) { | ||
4024 | CSIO_INC_STATS(hw, n_int_stray); | ||
4025 | return -EINVAL; | ||
4026 | } | ||
4027 | |||
4028 | rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx, | ||
4029 | csio_process_fwevtq_entry, NULL); | ||
4030 | return rv; | ||
4031 | } | ||
4032 | |||
4033 | /**************************************************************************** | ||
4034 | * Entry points | ||
4035 | ****************************************************************************/ | ||
4036 | |||
4037 | /* Management module */ | ||
4038 | /* | ||
4039 | * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q. | ||
4040 | * mgmt - mgmt module | ||
4041 | * @io_req - io request | ||
4042 | * | ||
4043 | * Return - 0:if given IO Req exists in active Q. | ||
4044 | * -EINVAL :if lookup fails. | ||
4045 | */ | ||
4046 | int | ||
4047 | csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req) | ||
4048 | { | ||
4049 | struct list_head *tmp; | ||
4050 | |||
4051 | /* Lookup ioreq in the ACTIVEQ */ | ||
4052 | list_for_each(tmp, &mgmtm->active_q) { | ||
4053 | if (io_req == (struct csio_ioreq *)tmp) | ||
4054 | return 0; | ||
4055 | } | ||
4056 | return -EINVAL; | ||
4057 | } | ||
4058 | |||
4059 | #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */ | ||
4060 | |||
4061 | /* | ||
4062 | * csio_mgmts_tmo_handler - MGMT IO Timeout handler. | ||
4063 | * @data - Event data. | ||
4064 | * | ||
4065 | * Return - none. | ||
4066 | */ | ||
4067 | static void | ||
4068 | csio_mgmt_tmo_handler(uintptr_t data) | ||
4069 | { | ||
4070 | struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data; | ||
4071 | struct list_head *tmp; | ||
4072 | struct csio_ioreq *io_req; | ||
4073 | |||
4074 | csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n"); | ||
4075 | |||
4076 | spin_lock_irq(&mgmtm->hw->lock); | ||
4077 | |||
4078 | list_for_each(tmp, &mgmtm->active_q) { | ||
4079 | io_req = (struct csio_ioreq *) tmp; | ||
4080 | io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO); | ||
4081 | |||
4082 | if (!io_req->tmo) { | ||
4083 | /* Dequeue the request from retry Q. */ | ||
4084 | tmp = csio_list_prev(tmp); | ||
4085 | list_del_init(&io_req->sm.sm_list); | ||
4086 | if (io_req->io_cbfn) { | ||
4087 | /* io_req will be freed by completion handler */ | ||
4088 | io_req->wr_status = -ETIMEDOUT; | ||
4089 | io_req->io_cbfn(mgmtm->hw, io_req); | ||
4090 | } else { | ||
4091 | CSIO_DB_ASSERT(0); | ||
4092 | } | ||
4093 | } | ||
4094 | } | ||
4095 | |||
4096 | /* If retry queue is not empty, re-arm timer */ | ||
4097 | if (!list_empty(&mgmtm->active_q)) | ||
4098 | mod_timer(&mgmtm->mgmt_timer, | ||
4099 | jiffies + msecs_to_jiffies(ECM_MIN_TMO)); | ||
4100 | spin_unlock_irq(&mgmtm->hw->lock); | ||
4101 | } | ||
4102 | |||
4103 | static void | ||
4104 | csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm) | ||
4105 | { | ||
4106 | struct csio_hw *hw = mgmtm->hw; | ||
4107 | struct csio_ioreq *io_req; | ||
4108 | struct list_head *tmp; | ||
4109 | uint32_t count; | ||
4110 | |||
4111 | count = 30; | ||
4112 | /* Wait for all outstanding req to complete gracefully */ | ||
4113 | while ((!list_empty(&mgmtm->active_q)) && count--) { | ||
4114 | spin_unlock_irq(&hw->lock); | ||
4115 | msleep(2000); | ||
4116 | spin_lock_irq(&hw->lock); | ||
4117 | } | ||
4118 | |||
4119 | /* release outstanding req from ACTIVEQ */ | ||
4120 | list_for_each(tmp, &mgmtm->active_q) { | ||
4121 | io_req = (struct csio_ioreq *) tmp; | ||
4122 | tmp = csio_list_prev(tmp); | ||
4123 | list_del_init(&io_req->sm.sm_list); | ||
4124 | mgmtm->stats.n_active--; | ||
4125 | if (io_req->io_cbfn) { | ||
4126 | /* io_req will be freed by completion handler */ | ||
4127 | io_req->wr_status = -ETIMEDOUT; | ||
4128 | io_req->io_cbfn(mgmtm->hw, io_req); | ||
4129 | } | ||
4130 | } | ||
4131 | } | ||
4132 | |||
4133 | /* | ||
4134 | * csio_mgmt_init - Mgmt module init entry point | ||
4135 | * @mgmtsm - mgmt module | ||
4136 | * @hw - HW module | ||
4137 | * | ||
4138 | * Initialize mgmt timer, resource wait queue, active queue, | ||
4139 | * completion q. Allocate Egress and Ingress | ||
4140 | * WR queues and save off the queue index returned by the WR | ||
4141 | * module for future use. Allocate and save off mgmt reqs in the | ||
4142 | * mgmt_req_freelist for future use. Make sure their SM is initialized | ||
4143 | * to uninit state. | ||
4144 | * Returns: 0 - on success | ||
4145 | * -ENOMEM - on error. | ||
4146 | */ | ||
4147 | static int | ||
4148 | csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw) | ||
4149 | { | ||
4150 | struct timer_list *timer = &mgmtm->mgmt_timer; | ||
4151 | |||
4152 | init_timer(timer); | ||
4153 | timer->function = csio_mgmt_tmo_handler; | ||
4154 | timer->data = (unsigned long)mgmtm; | ||
4155 | |||
4156 | INIT_LIST_HEAD(&mgmtm->active_q); | ||
4157 | INIT_LIST_HEAD(&mgmtm->cbfn_q); | ||
4158 | |||
4159 | mgmtm->hw = hw; | ||
4160 | /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/ | ||
4161 | |||
4162 | return 0; | ||
4163 | } | ||
4164 | |||
4165 | /* | ||
4166 | * csio_mgmtm_exit - MGMT module exit entry point | ||
4167 | * @mgmtsm - mgmt module | ||
4168 | * | ||
4169 | * This function called during MGMT module uninit. | ||
4170 | * Stop timers, free ioreqs allocated. | ||
4171 | * Returns: None | ||
4172 | * | ||
4173 | */ | ||
4174 | static void | ||
4175 | csio_mgmtm_exit(struct csio_mgmtm *mgmtm) | ||
4176 | { | ||
4177 | del_timer_sync(&mgmtm->mgmt_timer); | ||
4178 | } | ||
4179 | |||
4180 | |||
4181 | /** | ||
4182 | * csio_hw_start - Kicks off the HW State machine | ||
4183 | * @hw: Pointer to HW module. | ||
4184 | * | ||
4185 | * It is assumed that the initialization is a synchronous operation. | ||
4186 | * So when we return afer posting the event, the HW SM should be in | ||
4187 | * the ready state, if there were no errors during init. | ||
4188 | */ | ||
4189 | int | ||
4190 | csio_hw_start(struct csio_hw *hw) | ||
4191 | { | ||
4192 | spin_lock_irq(&hw->lock); | ||
4193 | csio_post_event(&hw->sm, CSIO_HWE_CFG); | ||
4194 | spin_unlock_irq(&hw->lock); | ||
4195 | |||
4196 | if (csio_is_hw_ready(hw)) | ||
4197 | return 0; | ||
4198 | else | ||
4199 | return -EINVAL; | ||
4200 | } | ||
4201 | |||
4202 | int | ||
4203 | csio_hw_stop(struct csio_hw *hw) | ||
4204 | { | ||
4205 | csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE); | ||
4206 | |||
4207 | if (csio_is_hw_removing(hw)) | ||
4208 | return 0; | ||
4209 | else | ||
4210 | return -EINVAL; | ||
4211 | } | ||
4212 | |||
4213 | /* Max reset retries */ | ||
4214 | #define CSIO_MAX_RESET_RETRIES 3 | ||
4215 | |||
4216 | /** | ||
4217 | * csio_hw_reset - Reset the hardware | ||
4218 | * @hw: HW module. | ||
4219 | * | ||
4220 | * Caller should hold lock across this function. | ||
4221 | */ | ||
4222 | int | ||
4223 | csio_hw_reset(struct csio_hw *hw) | ||
4224 | { | ||
4225 | if (!csio_is_hw_master(hw)) | ||
4226 | return -EPERM; | ||
4227 | |||
4228 | if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) { | ||
4229 | csio_dbg(hw, "Max hw reset attempts reached.."); | ||
4230 | return -EINVAL; | ||
4231 | } | ||
4232 | |||
4233 | hw->rst_retries++; | ||
4234 | csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET); | ||
4235 | |||
4236 | if (csio_is_hw_ready(hw)) { | ||
4237 | hw->rst_retries = 0; | ||
4238 | hw->stats.n_reset_start = jiffies_to_msecs(jiffies); | ||
4239 | return 0; | ||
4240 | } else | ||
4241 | return -EINVAL; | ||
4242 | } | ||
4243 | |||
4244 | /* | ||
4245 | * csio_hw_get_device_id - Caches the Adapter's vendor & device id. | ||
4246 | * @hw: HW module. | ||
4247 | */ | ||
4248 | static void | ||
4249 | csio_hw_get_device_id(struct csio_hw *hw) | ||
4250 | { | ||
4251 | /* Is the adapter device id cached already ?*/ | ||
4252 | if (csio_is_dev_id_cached(hw)) | ||
4253 | return; | ||
4254 | |||
4255 | /* Get the PCI vendor & device id */ | ||
4256 | pci_read_config_word(hw->pdev, PCI_VENDOR_ID, | ||
4257 | &hw->params.pci.vendor_id); | ||
4258 | pci_read_config_word(hw->pdev, PCI_DEVICE_ID, | ||
4259 | &hw->params.pci.device_id); | ||
4260 | |||
4261 | csio_dev_id_cached(hw); | ||
4262 | |||
4263 | } /* csio_hw_get_device_id */ | ||
4264 | |||
4265 | /* | ||
4266 | * csio_hw_set_description - Set the model, description of the hw. | ||
4267 | * @hw: HW module. | ||
4268 | * @ven_id: PCI Vendor ID | ||
4269 | * @dev_id: PCI Device ID | ||
4270 | */ | ||
4271 | static void | ||
4272 | csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id) | ||
4273 | { | ||
4274 | uint32_t adap_type, prot_type; | ||
4275 | |||
4276 | if (ven_id == CSIO_VENDOR_ID) { | ||
4277 | prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); | ||
4278 | adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK); | ||
4279 | |||
4280 | if (prot_type == CSIO_FPGA) { | ||
4281 | memcpy(hw->model_desc, | ||
4282 | csio_fcoe_adapters[13].description, 32); | ||
4283 | } else if (prot_type == CSIO_T4_FCOE_ASIC) { | ||
4284 | memcpy(hw->hw_ver, | ||
4285 | csio_fcoe_adapters[adap_type].model_no, 16); | ||
4286 | memcpy(hw->model_desc, | ||
4287 | csio_fcoe_adapters[adap_type].description, 32); | ||
4288 | } else { | ||
4289 | char tempName[32] = "Chelsio FCoE Controller"; | ||
4290 | memcpy(hw->model_desc, tempName, 32); | ||
4291 | |||
4292 | CSIO_DB_ASSERT(0); | ||
4293 | } | ||
4294 | } | ||
4295 | } /* csio_hw_set_description */ | ||
4296 | |||
4297 | /** | ||
4298 | * csio_hw_init - Initialize HW module. | ||
4299 | * @hw: Pointer to HW module. | ||
4300 | * | ||
4301 | * Initialize the members of the HW module. | ||
4302 | */ | ||
4303 | int | ||
4304 | csio_hw_init(struct csio_hw *hw) | ||
4305 | { | ||
4306 | int rv = -EINVAL; | ||
4307 | uint32_t i; | ||
4308 | uint16_t ven_id, dev_id; | ||
4309 | struct csio_evt_msg *evt_entry; | ||
4310 | |||
4311 | INIT_LIST_HEAD(&hw->sm.sm_list); | ||
4312 | csio_init_state(&hw->sm, csio_hws_uninit); | ||
4313 | spin_lock_init(&hw->lock); | ||
4314 | INIT_LIST_HEAD(&hw->sln_head); | ||
4315 | |||
4316 | /* Get the PCI vendor & device id */ | ||
4317 | csio_hw_get_device_id(hw); | ||
4318 | |||
4319 | strcpy(hw->name, CSIO_HW_NAME); | ||
4320 | |||
4321 | /* Set the model & its description */ | ||
4322 | |||
4323 | ven_id = hw->params.pci.vendor_id; | ||
4324 | dev_id = hw->params.pci.device_id; | ||
4325 | |||
4326 | csio_hw_set_description(hw, ven_id, dev_id); | ||
4327 | |||
4328 | /* Initialize default log level */ | ||
4329 | hw->params.log_level = (uint32_t) csio_dbg_level; | ||
4330 | |||
4331 | csio_set_fwevt_intr_idx(hw, -1); | ||
4332 | csio_set_nondata_intr_idx(hw, -1); | ||
4333 | |||
4334 | /* Init all the modules: Mailbox, WorkRequest and Transport */ | ||
4335 | if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer)) | ||
4336 | goto err; | ||
4337 | |||
4338 | rv = csio_wrm_init(csio_hw_to_wrm(hw), hw); | ||
4339 | if (rv) | ||
4340 | goto err_mbm_exit; | ||
4341 | |||
4342 | rv = csio_scsim_init(csio_hw_to_scsim(hw), hw); | ||
4343 | if (rv) | ||
4344 | goto err_wrm_exit; | ||
4345 | |||
4346 | rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw); | ||
4347 | if (rv) | ||
4348 | goto err_scsim_exit; | ||
4349 | /* Pre-allocate evtq and initialize them */ | ||
4350 | INIT_LIST_HEAD(&hw->evt_active_q); | ||
4351 | INIT_LIST_HEAD(&hw->evt_free_q); | ||
4352 | for (i = 0; i < csio_evtq_sz; i++) { | ||
4353 | |||
4354 | evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL); | ||
4355 | if (!evt_entry) { | ||
4356 | csio_err(hw, "Failed to initialize eventq"); | ||
4357 | goto err_evtq_cleanup; | ||
4358 | } | ||
4359 | |||
4360 | list_add_tail(&evt_entry->list, &hw->evt_free_q); | ||
4361 | CSIO_INC_STATS(hw, n_evt_freeq); | ||
4362 | } | ||
4363 | |||
4364 | hw->dev_num = dev_num; | ||
4365 | dev_num++; | ||
4366 | |||
4367 | return 0; | ||
4368 | |||
4369 | err_evtq_cleanup: | ||
4370 | csio_evtq_cleanup(hw); | ||
4371 | csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); | ||
4372 | err_scsim_exit: | ||
4373 | csio_scsim_exit(csio_hw_to_scsim(hw)); | ||
4374 | err_wrm_exit: | ||
4375 | csio_wrm_exit(csio_hw_to_wrm(hw), hw); | ||
4376 | err_mbm_exit: | ||
4377 | csio_mbm_exit(csio_hw_to_mbm(hw)); | ||
4378 | err: | ||
4379 | return rv; | ||
4380 | } | ||
4381 | |||
4382 | /** | ||
4383 | * csio_hw_exit - Un-initialize HW module. | ||
4384 | * @hw: Pointer to HW module. | ||
4385 | * | ||
4386 | */ | ||
4387 | void | ||
4388 | csio_hw_exit(struct csio_hw *hw) | ||
4389 | { | ||
4390 | csio_evtq_cleanup(hw); | ||
4391 | csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); | ||
4392 | csio_scsim_exit(csio_hw_to_scsim(hw)); | ||
4393 | csio_wrm_exit(csio_hw_to_wrm(hw), hw); | ||
4394 | csio_mbm_exit(csio_hw_to_mbm(hw)); | ||
4395 | } | ||
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h new file mode 100644 index 000000000000..2a9b052a58e4 --- /dev/null +++ b/drivers/scsi/csiostor/csio_hw.h | |||
@@ -0,0 +1,667 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef __CSIO_HW_H__ | ||
36 | #define __CSIO_HW_H__ | ||
37 | |||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/pci.h> | ||
40 | #include <linux/device.h> | ||
41 | #include <linux/workqueue.h> | ||
42 | #include <linux/compiler.h> | ||
43 | #include <linux/cdev.h> | ||
44 | #include <linux/list.h> | ||
45 | #include <linux/mempool.h> | ||
46 | #include <linux/io.h> | ||
47 | #include <linux/spinlock_types.h> | ||
48 | #include <scsi/scsi_device.h> | ||
49 | #include <scsi/scsi_transport_fc.h> | ||
50 | |||
51 | #include "csio_wr.h" | ||
52 | #include "csio_mb.h" | ||
53 | #include "csio_scsi.h" | ||
54 | #include "csio_defs.h" | ||
55 | #include "t4_regs.h" | ||
56 | #include "t4_msg.h" | ||
57 | |||
58 | /* | ||
59 | * An error value used by host. Should not clash with FW defined return values. | ||
60 | */ | ||
61 | #define FW_HOSTERROR 255 | ||
62 | |||
63 | #define CSIO_FW_FNAME "cxgb4/t4fw.bin" | ||
64 | #define CSIO_CF_FNAME "cxgb4/t4-config.txt" | ||
65 | |||
66 | #define FW_VERSION_MAJOR 1 | ||
67 | #define FW_VERSION_MINOR 2 | ||
68 | #define FW_VERSION_MICRO 8 | ||
69 | |||
70 | #define CSIO_HW_NAME "Chelsio FCoE Adapter" | ||
71 | #define CSIO_MAX_PFN 8 | ||
72 | #define CSIO_MAX_PPORTS 4 | ||
73 | |||
74 | #define CSIO_MAX_LUN 0xFFFF | ||
75 | #define CSIO_MAX_QUEUE 2048 | ||
76 | #define CSIO_MAX_CMD_PER_LUN 32 | ||
77 | #define CSIO_MAX_DDP_BUF_SIZE (1024 * 1024) | ||
78 | #define CSIO_MAX_SECTOR_SIZE 128 | ||
79 | |||
80 | /* Interrupts */ | ||
81 | #define CSIO_EXTRA_MSI_IQS 2 /* Extra iqs for INTX/MSI mode | ||
82 | * (Forward intr iq + fw iq) */ | ||
83 | #define CSIO_EXTRA_VECS 2 /* non-data + FW evt */ | ||
84 | #define CSIO_MAX_SCSI_CPU 128 | ||
85 | #define CSIO_MAX_SCSI_QSETS (CSIO_MAX_SCSI_CPU * CSIO_MAX_PPORTS) | ||
86 | #define CSIO_MAX_MSIX_VECS (CSIO_MAX_SCSI_QSETS + CSIO_EXTRA_VECS) | ||
87 | |||
88 | /* Queues */ | ||
89 | enum { | ||
90 | CSIO_INTR_WRSIZE = 128, | ||
91 | CSIO_INTR_IQSIZE = ((CSIO_MAX_MSIX_VECS + 1) * CSIO_INTR_WRSIZE), | ||
92 | CSIO_FWEVT_WRSIZE = 128, | ||
93 | CSIO_FWEVT_IQLEN = 128, | ||
94 | CSIO_FWEVT_FLBUFS = 64, | ||
95 | CSIO_FWEVT_IQSIZE = (CSIO_FWEVT_WRSIZE * CSIO_FWEVT_IQLEN), | ||
96 | CSIO_HW_NIQ = 1, | ||
97 | CSIO_HW_NFLQ = 1, | ||
98 | CSIO_HW_NEQ = 1, | ||
99 | CSIO_HW_NINTXQ = 1, | ||
100 | }; | ||
101 | |||
102 | struct csio_msix_entries { | ||
103 | unsigned short vector; /* Vector assigned by pci_enable_msix */ | ||
104 | void *dev_id; /* Priv object associated w/ this msix*/ | ||
105 | char desc[24]; /* Description of this vector */ | ||
106 | }; | ||
107 | |||
108 | struct csio_scsi_qset { | ||
109 | int iq_idx; /* Ingress index */ | ||
110 | int eq_idx; /* Egress index */ | ||
111 | uint32_t intr_idx; /* MSIX Vector index */ | ||
112 | }; | ||
113 | |||
114 | struct csio_scsi_cpu_info { | ||
115 | int16_t max_cpus; | ||
116 | }; | ||
117 | |||
118 | extern int csio_dbg_level; | ||
119 | extern int csio_force_master; | ||
120 | extern unsigned int csio_port_mask; | ||
121 | extern int csio_msi; | ||
122 | |||
123 | #define CSIO_VENDOR_ID 0x1425 | ||
124 | #define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00 | ||
125 | #define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF | ||
126 | #define CSIO_FPGA 0xA000 | ||
127 | #define CSIO_T4_FCOE_ASIC 0x4600 | ||
128 | |||
129 | #define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ | ||
130 | EDC1 | LE | TP | MA | PM_TX | PM_RX | \ | ||
131 | ULP_RX | CPL_SWITCH | SGE | \ | ||
132 | ULP_TX | SF) | ||
133 | |||
134 | /* | ||
135 | * Hard parameters used to initialize the card in the absence of a | ||
136 | * configuration file. | ||
137 | */ | ||
138 | enum { | ||
139 | /* General */ | ||
140 | CSIO_SGE_DBFIFO_INT_THRESH = 10, | ||
141 | |||
142 | CSIO_SGE_RX_DMA_OFFSET = 2, | ||
143 | |||
144 | CSIO_SGE_FLBUF_SIZE1 = 65536, | ||
145 | CSIO_SGE_FLBUF_SIZE2 = 1536, | ||
146 | CSIO_SGE_FLBUF_SIZE3 = 9024, | ||
147 | CSIO_SGE_FLBUF_SIZE4 = 9216, | ||
148 | CSIO_SGE_FLBUF_SIZE5 = 2048, | ||
149 | CSIO_SGE_FLBUF_SIZE6 = 128, | ||
150 | CSIO_SGE_FLBUF_SIZE7 = 8192, | ||
151 | CSIO_SGE_FLBUF_SIZE8 = 16384, | ||
152 | |||
153 | CSIO_SGE_TIMER_VAL_0 = 5, | ||
154 | CSIO_SGE_TIMER_VAL_1 = 10, | ||
155 | CSIO_SGE_TIMER_VAL_2 = 20, | ||
156 | CSIO_SGE_TIMER_VAL_3 = 50, | ||
157 | CSIO_SGE_TIMER_VAL_4 = 100, | ||
158 | CSIO_SGE_TIMER_VAL_5 = 200, | ||
159 | |||
160 | CSIO_SGE_INT_CNT_VAL_0 = 1, | ||
161 | CSIO_SGE_INT_CNT_VAL_1 = 4, | ||
162 | CSIO_SGE_INT_CNT_VAL_2 = 8, | ||
163 | CSIO_SGE_INT_CNT_VAL_3 = 16, | ||
164 | |||
165 | /* Storage specific - used by FW_PFVF_CMD */ | ||
166 | CSIO_WX_CAPS = FW_CMD_CAP_PF, /* w/x all */ | ||
167 | CSIO_R_CAPS = FW_CMD_CAP_PF, /* r all */ | ||
168 | CSIO_NVI = 4, | ||
169 | CSIO_NIQ_FLINT = 34, | ||
170 | CSIO_NETH_CTRL = 32, | ||
171 | CSIO_NEQ = 66, | ||
172 | CSIO_NEXACTF = 32, | ||
173 | CSIO_CMASK = FW_PFVF_CMD_CMASK_MASK, | ||
174 | CSIO_PMASK = FW_PFVF_CMD_PMASK_MASK, | ||
175 | }; | ||
176 | |||
177 | /* Slowpath events */ | ||
178 | enum csio_evt { | ||
179 | CSIO_EVT_FW = 0, /* FW event */ | ||
180 | CSIO_EVT_MBX, /* MBX event */ | ||
181 | CSIO_EVT_SCN, /* State change notification */ | ||
182 | CSIO_EVT_DEV_LOSS, /* Device loss event */ | ||
183 | CSIO_EVT_MAX, /* Max supported event */ | ||
184 | }; | ||
185 | |||
186 | #define CSIO_EVT_MSG_SIZE 512 | ||
187 | #define CSIO_EVTQ_SIZE 512 | ||
188 | |||
189 | /* Event msg */ | ||
190 | struct csio_evt_msg { | ||
191 | struct list_head list; /* evt queue*/ | ||
192 | enum csio_evt type; | ||
193 | uint8_t data[CSIO_EVT_MSG_SIZE]; | ||
194 | }; | ||
195 | |||
196 | enum { | ||
197 | EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */ | ||
198 | SERNUM_LEN = 16, /* Serial # length */ | ||
199 | EC_LEN = 16, /* E/C length */ | ||
200 | ID_LEN = 16, /* ID length */ | ||
201 | TRACE_LEN = 112, /* length of trace data and mask */ | ||
202 | }; | ||
203 | |||
204 | enum { | ||
205 | SF_PAGE_SIZE = 256, /* serial flash page size */ | ||
206 | SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ | ||
207 | SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */ | ||
208 | }; | ||
209 | |||
210 | enum { MEM_EDC0, MEM_EDC1, MEM_MC }; | ||
211 | |||
212 | enum { | ||
213 | MEMWIN0_APERTURE = 2048, | ||
214 | MEMWIN0_BASE = 0x1b800, | ||
215 | MEMWIN1_APERTURE = 32768, | ||
216 | MEMWIN1_BASE = 0x28000, | ||
217 | MEMWIN2_APERTURE = 65536, | ||
218 | MEMWIN2_BASE = 0x30000, | ||
219 | }; | ||
220 | |||
221 | /* serial flash and firmware constants */ | ||
222 | enum { | ||
223 | SF_ATTEMPTS = 10, /* max retries for SF operations */ | ||
224 | |||
225 | /* flash command opcodes */ | ||
226 | SF_PROG_PAGE = 2, /* program page */ | ||
227 | SF_WR_DISABLE = 4, /* disable writes */ | ||
228 | SF_RD_STATUS = 5, /* read status register */ | ||
229 | SF_WR_ENABLE = 6, /* enable writes */ | ||
230 | SF_RD_DATA_FAST = 0xb, /* read flash */ | ||
231 | SF_RD_ID = 0x9f, /* read ID */ | ||
232 | SF_ERASE_SECTOR = 0xd8, /* erase sector */ | ||
233 | |||
234 | FW_START_SEC = 8, /* first flash sector for FW */ | ||
235 | FW_END_SEC = 15, /* last flash sector for FW */ | ||
236 | FW_IMG_START = FW_START_SEC * SF_SEC_SIZE, | ||
237 | FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE, | ||
238 | |||
239 | FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/ | ||
240 | FLASH_CFG_OFFSET = 0x1f0000, | ||
241 | FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE, | ||
242 | FPGA_FLASH_CFG_OFFSET = 0xf0000 , /* if FPGA mode, then cfg file is | ||
243 | * at 1MB - 64KB */ | ||
244 | FPGA_FLASH_CFG_START_SEC = FPGA_FLASH_CFG_OFFSET / SF_SEC_SIZE, | ||
245 | }; | ||
246 | |||
247 | /* | ||
248 | * Flash layout. | ||
249 | */ | ||
250 | #define FLASH_START(start) ((start) * SF_SEC_SIZE) | ||
251 | #define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE) | ||
252 | |||
253 | enum { | ||
254 | /* | ||
255 | * Location of firmware image in FLASH. | ||
256 | */ | ||
257 | FLASH_FW_START_SEC = 8, | ||
258 | FLASH_FW_NSECS = 8, | ||
259 | FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC), | ||
260 | FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS), | ||
261 | |||
262 | }; | ||
263 | |||
264 | #undef FLASH_START | ||
265 | #undef FLASH_MAX_SIZE | ||
266 | |||
267 | /* Management module */ | ||
268 | enum { | ||
269 | CSIO_MGMT_EQ_WRSIZE = 512, | ||
270 | CSIO_MGMT_IQ_WRSIZE = 128, | ||
271 | CSIO_MGMT_EQLEN = 64, | ||
272 | CSIO_MGMT_IQLEN = 64, | ||
273 | }; | ||
274 | |||
275 | #define CSIO_MGMT_EQSIZE (CSIO_MGMT_EQLEN * CSIO_MGMT_EQ_WRSIZE) | ||
276 | #define CSIO_MGMT_IQSIZE (CSIO_MGMT_IQLEN * CSIO_MGMT_IQ_WRSIZE) | ||
277 | |||
278 | /* mgmt module stats */ | ||
279 | struct csio_mgmtm_stats { | ||
280 | uint32_t n_abort_req; /* Total abort request */ | ||
281 | uint32_t n_abort_rsp; /* Total abort response */ | ||
282 | uint32_t n_close_req; /* Total close request */ | ||
283 | uint32_t n_close_rsp; /* Total close response */ | ||
284 | uint32_t n_err; /* Total Errors */ | ||
285 | uint32_t n_drop; /* Total request dropped */ | ||
286 | uint32_t n_active; /* Count of active_q */ | ||
287 | uint32_t n_cbfn; /* Count of cbfn_q */ | ||
288 | }; | ||
289 | |||
290 | /* MGMT module */ | ||
291 | struct csio_mgmtm { | ||
292 | struct csio_hw *hw; /* Pointer to HW moduel */ | ||
293 | int eq_idx; /* Egress queue index */ | ||
294 | int iq_idx; /* Ingress queue index */ | ||
295 | int msi_vec; /* MSI vector */ | ||
296 | struct list_head active_q; /* Outstanding ELS/CT */ | ||
297 | struct list_head abort_q; /* Outstanding abort req */ | ||
298 | struct list_head cbfn_q; /* Completion queue */ | ||
299 | struct list_head mgmt_req_freelist; /* Free poll of reqs */ | ||
300 | /* ELSCT request freelist*/ | ||
301 | struct timer_list mgmt_timer; /* MGMT timer */ | ||
302 | struct csio_mgmtm_stats stats; /* ELS/CT stats */ | ||
303 | }; | ||
304 | |||
305 | struct csio_adap_desc { | ||
306 | char model_no[16]; | ||
307 | char description[32]; | ||
308 | }; | ||
309 | |||
310 | struct pci_params { | ||
311 | uint16_t vendor_id; | ||
312 | uint16_t device_id; | ||
313 | uint32_t vpd_cap_addr; | ||
314 | uint16_t speed; | ||
315 | uint8_t width; | ||
316 | }; | ||
317 | |||
318 | /* User configurable hw parameters */ | ||
319 | struct csio_hw_params { | ||
320 | uint32_t sf_size; /* serial flash | ||
321 | * size in bytes | ||
322 | */ | ||
323 | uint32_t sf_nsec; /* # of flash sectors */ | ||
324 | struct pci_params pci; | ||
325 | uint32_t log_level; /* Module-level for | ||
326 | * debug log. | ||
327 | */ | ||
328 | }; | ||
329 | |||
330 | struct csio_vpd { | ||
331 | uint32_t cclk; | ||
332 | uint8_t ec[EC_LEN + 1]; | ||
333 | uint8_t sn[SERNUM_LEN + 1]; | ||
334 | uint8_t id[ID_LEN + 1]; | ||
335 | }; | ||
336 | |||
337 | struct csio_pport { | ||
338 | uint16_t pcap; | ||
339 | uint8_t portid; | ||
340 | uint8_t link_status; | ||
341 | uint16_t link_speed; | ||
342 | uint8_t mac[6]; | ||
343 | uint8_t mod_type; | ||
344 | uint8_t rsvd1; | ||
345 | uint8_t rsvd2; | ||
346 | uint8_t rsvd3; | ||
347 | }; | ||
348 | |||
349 | /* fcoe resource information */ | ||
350 | struct csio_fcoe_res_info { | ||
351 | uint16_t e_d_tov; | ||
352 | uint16_t r_a_tov_seq; | ||
353 | uint16_t r_a_tov_els; | ||
354 | uint16_t r_r_tov; | ||
355 | uint32_t max_xchgs; | ||
356 | uint32_t max_ssns; | ||
357 | uint32_t used_xchgs; | ||
358 | uint32_t used_ssns; | ||
359 | uint32_t max_fcfs; | ||
360 | uint32_t max_vnps; | ||
361 | uint32_t used_fcfs; | ||
362 | uint32_t used_vnps; | ||
363 | }; | ||
364 | |||
365 | /* HW State machine Events */ | ||
366 | enum csio_hw_ev { | ||
367 | CSIO_HWE_CFG = (uint32_t)1, /* Starts off the State machine */ | ||
368 | CSIO_HWE_INIT, /* Config done, start Init */ | ||
369 | CSIO_HWE_INIT_DONE, /* Init Mailboxes sent, HW ready */ | ||
370 | CSIO_HWE_FATAL, /* Fatal error during initialization */ | ||
371 | CSIO_HWE_PCIERR_DETECTED,/* PCI error recovery detetced */ | ||
372 | CSIO_HWE_PCIERR_SLOT_RESET, /* Slot reset after PCI recoviery */ | ||
373 | CSIO_HWE_PCIERR_RESUME, /* Resume after PCI error recovery */ | ||
374 | CSIO_HWE_QUIESCED, /* HBA quiesced */ | ||
375 | CSIO_HWE_HBA_RESET, /* HBA reset requested */ | ||
376 | CSIO_HWE_HBA_RESET_DONE, /* HBA reset completed */ | ||
377 | CSIO_HWE_FW_DLOAD, /* FW download requested */ | ||
378 | CSIO_HWE_PCI_REMOVE, /* PCI de-instantiation */ | ||
379 | CSIO_HWE_SUSPEND, /* HW suspend for Online(hot) replacement */ | ||
380 | CSIO_HWE_RESUME, /* HW resume for Online(hot) replacement */ | ||
381 | CSIO_HWE_MAX, /* Max HW event */ | ||
382 | }; | ||
383 | |||
384 | /* hw stats */ | ||
385 | struct csio_hw_stats { | ||
386 | uint32_t n_evt_activeq; /* Number of event in active Q */ | ||
387 | uint32_t n_evt_freeq; /* Number of event in free Q */ | ||
388 | uint32_t n_evt_drop; /* Number of event droped */ | ||
389 | uint32_t n_evt_unexp; /* Number of unexpected events */ | ||
390 | uint32_t n_pcich_offline;/* Number of pci channel offline */ | ||
391 | uint32_t n_lnlkup_miss; /* Number of lnode lookup miss */ | ||
392 | uint32_t n_cpl_fw6_msg; /* Number of cpl fw6 message*/ | ||
393 | uint32_t n_cpl_fw6_pld; /* Number of cpl fw6 payload*/ | ||
394 | uint32_t n_cpl_unexp; /* Number of unexpected cpl */ | ||
395 | uint32_t n_mbint_unexp; /* Number of unexpected mbox */ | ||
396 | /* interrupt */ | ||
397 | uint32_t n_plint_unexp; /* Number of unexpected PL */ | ||
398 | /* interrupt */ | ||
399 | uint32_t n_plint_cnt; /* Number of PL interrupt */ | ||
400 | uint32_t n_int_stray; /* Number of stray interrupt */ | ||
401 | uint32_t n_err; /* Number of hw errors */ | ||
402 | uint32_t n_err_fatal; /* Number of fatal errors */ | ||
403 | uint32_t n_err_nomem; /* Number of memory alloc failure */ | ||
404 | uint32_t n_err_io; /* Number of IO failure */ | ||
405 | enum csio_hw_ev n_evt_sm[CSIO_HWE_MAX]; /* Number of sm events */ | ||
406 | uint64_t n_reset_start; /* Start time after the reset */ | ||
407 | uint32_t rsvd1; | ||
408 | }; | ||
409 | |||
410 | /* Defines for hw->flags */ | ||
411 | #define CSIO_HWF_MASTER 0x00000001 /* This is the Master | ||
412 | * function for the | ||
413 | * card. | ||
414 | */ | ||
415 | #define CSIO_HWF_HW_INTR_ENABLED 0x00000002 /* Are HW Interrupt | ||
416 | * enable bit set? | ||
417 | */ | ||
418 | #define CSIO_HWF_FWEVT_PENDING 0x00000004 /* FW events pending */ | ||
419 | #define CSIO_HWF_Q_MEM_ALLOCED 0x00000008 /* Queues have been | ||
420 | * allocated memory. | ||
421 | */ | ||
422 | #define CSIO_HWF_Q_FW_ALLOCED 0x00000010 /* Queues have been | ||
423 | * allocated in FW. | ||
424 | */ | ||
425 | #define CSIO_HWF_VPD_VALID 0x00000020 /* Valid VPD copied */ | ||
426 | #define CSIO_HWF_DEVID_CACHED 0X00000040 /* PCI vendor & device | ||
427 | * id cached */ | ||
428 | #define CSIO_HWF_FWEVT_STOP 0x00000080 /* Stop processing | ||
429 | * FW events | ||
430 | */ | ||
431 | #define CSIO_HWF_USING_SOFT_PARAMS 0x00000100 /* Using FW config | ||
432 | * params | ||
433 | */ | ||
434 | #define CSIO_HWF_HOST_INTR_ENABLED 0x00000200 /* Are host interrupts | ||
435 | * enabled? | ||
436 | */ | ||
437 | |||
438 | #define csio_is_hw_intr_enabled(__hw) \ | ||
439 | ((__hw)->flags & CSIO_HWF_HW_INTR_ENABLED) | ||
440 | #define csio_is_host_intr_enabled(__hw) \ | ||
441 | ((__hw)->flags & CSIO_HWF_HOST_INTR_ENABLED) | ||
442 | #define csio_is_hw_master(__hw) ((__hw)->flags & CSIO_HWF_MASTER) | ||
443 | #define csio_is_valid_vpd(__hw) ((__hw)->flags & CSIO_HWF_VPD_VALID) | ||
444 | #define csio_is_dev_id_cached(__hw) ((__hw)->flags & CSIO_HWF_DEVID_CACHED) | ||
445 | #define csio_valid_vpd_copied(__hw) ((__hw)->flags |= CSIO_HWF_VPD_VALID) | ||
446 | #define csio_dev_id_cached(__hw) ((__hw)->flags |= CSIO_HWF_DEVID_CACHED) | ||
447 | |||
448 | /* Defines for intr_mode */ | ||
449 | enum csio_intr_mode { | ||
450 | CSIO_IM_NONE = 0, | ||
451 | CSIO_IM_INTX = 1, | ||
452 | CSIO_IM_MSI = 2, | ||
453 | CSIO_IM_MSIX = 3, | ||
454 | }; | ||
455 | |||
456 | /* Master HW structure: One per function */ | ||
457 | struct csio_hw { | ||
458 | struct csio_sm sm; /* State machine: should | ||
459 | * be the 1st member. | ||
460 | */ | ||
461 | spinlock_t lock; /* Lock for hw */ | ||
462 | |||
463 | struct csio_scsim scsim; /* SCSI module*/ | ||
464 | struct csio_wrm wrm; /* Work request module*/ | ||
465 | struct pci_dev *pdev; /* PCI device */ | ||
466 | |||
467 | void __iomem *regstart; /* Virtual address of | ||
468 | * register map | ||
469 | */ | ||
470 | /* SCSI queue sets */ | ||
471 | uint32_t num_sqsets; /* Number of SCSI | ||
472 | * queue sets */ | ||
473 | uint32_t num_scsi_msix_cpus; /* Number of CPUs that | ||
474 | * will be used | ||
475 | * for ingress | ||
476 | * processing. | ||
477 | */ | ||
478 | |||
479 | struct csio_scsi_qset sqset[CSIO_MAX_PPORTS][CSIO_MAX_SCSI_CPU]; | ||
480 | struct csio_scsi_cpu_info scsi_cpu_info[CSIO_MAX_PPORTS]; | ||
481 | |||
482 | uint32_t evtflag; /* Event flag */ | ||
483 | uint32_t flags; /* HW flags */ | ||
484 | |||
485 | struct csio_mgmtm mgmtm; /* management module */ | ||
486 | struct csio_mbm mbm; /* Mailbox module */ | ||
487 | |||
488 | /* Lnodes */ | ||
489 | uint32_t num_lns; /* Number of lnodes */ | ||
490 | struct csio_lnode *rln; /* Root lnode */ | ||
491 | struct list_head sln_head; /* Sibling node list | ||
492 | * list | ||
493 | */ | ||
494 | int intr_iq_idx; /* Forward interrupt | ||
495 | * queue. | ||
496 | */ | ||
497 | int fwevt_iq_idx; /* FW evt queue */ | ||
498 | struct work_struct evtq_work; /* Worker thread for | ||
499 | * HW events. | ||
500 | */ | ||
501 | struct list_head evt_free_q; /* freelist of evt | ||
502 | * elements | ||
503 | */ | ||
504 | struct list_head evt_active_q; /* active evt queue*/ | ||
505 | |||
506 | /* board related info */ | ||
507 | char name[32]; | ||
508 | char hw_ver[16]; | ||
509 | char model_desc[32]; | ||
510 | char drv_version[32]; | ||
511 | char fwrev_str[32]; | ||
512 | uint32_t optrom_ver; | ||
513 | uint32_t fwrev; | ||
514 | uint32_t tp_vers; | ||
515 | char chip_ver; | ||
516 | uint32_t cfg_finiver; | ||
517 | uint32_t cfg_finicsum; | ||
518 | uint32_t cfg_cfcsum; | ||
519 | uint8_t cfg_csum_status; | ||
520 | uint8_t cfg_store; | ||
521 | enum csio_dev_state fw_state; | ||
522 | struct csio_vpd vpd; | ||
523 | |||
524 | uint8_t pfn; /* Physical Function | ||
525 | * number | ||
526 | */ | ||
527 | uint32_t port_vec; /* Port vector */ | ||
528 | uint8_t num_pports; /* Number of physical | ||
529 | * ports. | ||
530 | */ | ||
531 | uint8_t rst_retries; /* Reset retries */ | ||
532 | uint8_t cur_evt; /* current s/m evt */ | ||
533 | uint8_t prev_evt; /* Previous s/m evt */ | ||
534 | uint32_t dev_num; /* device number */ | ||
535 | struct csio_pport pport[CSIO_MAX_PPORTS]; /* Ports (XGMACs) */ | ||
536 | struct csio_hw_params params; /* Hw parameters */ | ||
537 | |||
538 | struct pci_pool *scsi_pci_pool; /* PCI pool for SCSI */ | ||
539 | mempool_t *mb_mempool; /* Mailbox memory pool*/ | ||
540 | mempool_t *rnode_mempool; /* rnode memory pool */ | ||
541 | |||
542 | /* Interrupt */ | ||
543 | enum csio_intr_mode intr_mode; /* INTx, MSI, MSIX */ | ||
544 | uint32_t fwevt_intr_idx; /* FW evt MSIX/interrupt | ||
545 | * index | ||
546 | */ | ||
547 | uint32_t nondata_intr_idx; /* nondata MSIX/intr | ||
548 | * idx | ||
549 | */ | ||
550 | |||
551 | uint8_t cfg_neq; /* FW configured no of | ||
552 | * egress queues | ||
553 | */ | ||
554 | uint8_t cfg_niq; /* FW configured no of | ||
555 | * iq queues. | ||
556 | */ | ||
557 | |||
558 | struct csio_fcoe_res_info fres_info; /* Fcoe resource info */ | ||
559 | |||
560 | /* MSIX vectors */ | ||
561 | struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS]; | ||
562 | |||
563 | struct dentry *debugfs_root; /* Debug FS */ | ||
564 | struct csio_hw_stats stats; /* Hw statistics */ | ||
565 | }; | ||
566 | |||
567 | /* Register access macros */ | ||
568 | #define csio_reg(_b, _r) ((_b) + (_r)) | ||
569 | |||
570 | #define csio_rd_reg8(_h, _r) readb(csio_reg((_h)->regstart, (_r))) | ||
571 | #define csio_rd_reg16(_h, _r) readw(csio_reg((_h)->regstart, (_r))) | ||
572 | #define csio_rd_reg32(_h, _r) readl(csio_reg((_h)->regstart, (_r))) | ||
573 | #define csio_rd_reg64(_h, _r) readq(csio_reg((_h)->regstart, (_r))) | ||
574 | |||
575 | #define csio_wr_reg8(_h, _v, _r) writeb((_v), \ | ||
576 | csio_reg((_h)->regstart, (_r))) | ||
577 | #define csio_wr_reg16(_h, _v, _r) writew((_v), \ | ||
578 | csio_reg((_h)->regstart, (_r))) | ||
579 | #define csio_wr_reg32(_h, _v, _r) writel((_v), \ | ||
580 | csio_reg((_h)->regstart, (_r))) | ||
581 | #define csio_wr_reg64(_h, _v, _r) writeq((_v), \ | ||
582 | csio_reg((_h)->regstart, (_r))) | ||
583 | |||
584 | void csio_set_reg_field(struct csio_hw *, uint32_t, uint32_t, uint32_t); | ||
585 | |||
586 | /* Core clocks <==> uSecs */ | ||
587 | static inline uint32_t | ||
588 | csio_core_ticks_to_us(struct csio_hw *hw, uint32_t ticks) | ||
589 | { | ||
590 | /* add Core Clock / 2 to round ticks to nearest uS */ | ||
591 | return (ticks * 1000 + hw->vpd.cclk/2) / hw->vpd.cclk; | ||
592 | } | ||
593 | |||
594 | static inline uint32_t | ||
595 | csio_us_to_core_ticks(struct csio_hw *hw, uint32_t us) | ||
596 | { | ||
597 | return (us * hw->vpd.cclk) / 1000; | ||
598 | } | ||
599 | |||
600 | /* Easy access macros */ | ||
601 | #define csio_hw_to_wrm(hw) ((struct csio_wrm *)(&(hw)->wrm)) | ||
602 | #define csio_hw_to_mbm(hw) ((struct csio_mbm *)(&(hw)->mbm)) | ||
603 | #define csio_hw_to_scsim(hw) ((struct csio_scsim *)(&(hw)->scsim)) | ||
604 | #define csio_hw_to_mgmtm(hw) ((struct csio_mgmtm *)(&(hw)->mgmtm)) | ||
605 | |||
606 | #define CSIO_PCI_BUS(hw) ((hw)->pdev->bus->number) | ||
607 | #define CSIO_PCI_DEV(hw) (PCI_SLOT((hw)->pdev->devfn)) | ||
608 | #define CSIO_PCI_FUNC(hw) (PCI_FUNC((hw)->pdev->devfn)) | ||
609 | |||
610 | #define csio_set_fwevt_intr_idx(_h, _i) ((_h)->fwevt_intr_idx = (_i)) | ||
611 | #define csio_get_fwevt_intr_idx(_h) ((_h)->fwevt_intr_idx) | ||
612 | #define csio_set_nondata_intr_idx(_h, _i) ((_h)->nondata_intr_idx = (_i)) | ||
613 | #define csio_get_nondata_intr_idx(_h) ((_h)->nondata_intr_idx) | ||
614 | |||
615 | /* Printing/logging */ | ||
616 | #define CSIO_DEVID(__dev) ((__dev)->dev_num) | ||
617 | #define CSIO_DEVID_LO(__dev) (CSIO_DEVID((__dev)) & 0xFFFF) | ||
618 | #define CSIO_DEVID_HI(__dev) ((CSIO_DEVID((__dev)) >> 16) & 0xFFFF) | ||
619 | |||
620 | #define csio_info(__hw, __fmt, ...) \ | ||
621 | dev_info(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__) | ||
622 | |||
623 | #define csio_fatal(__hw, __fmt, ...) \ | ||
624 | dev_crit(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__) | ||
625 | |||
626 | #define csio_err(__hw, __fmt, ...) \ | ||
627 | dev_err(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__) | ||
628 | |||
629 | #define csio_warn(__hw, __fmt, ...) \ | ||
630 | dev_warn(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__) | ||
631 | |||
632 | #ifdef __CSIO_DEBUG__ | ||
633 | #define csio_dbg(__hw, __fmt, ...) \ | ||
634 | csio_info((__hw), __fmt, ##__VA_ARGS__); | ||
635 | #else | ||
636 | #define csio_dbg(__hw, __fmt, ...) | ||
637 | #endif | ||
638 | |||
639 | int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *); | ||
640 | void csio_hw_intr_disable(struct csio_hw *); | ||
641 | int csio_hw_slow_intr_handler(struct csio_hw *hw); | ||
642 | int csio_hw_start(struct csio_hw *); | ||
643 | int csio_hw_stop(struct csio_hw *); | ||
644 | int csio_hw_reset(struct csio_hw *); | ||
645 | int csio_is_hw_ready(struct csio_hw *); | ||
646 | int csio_is_hw_removing(struct csio_hw *); | ||
647 | |||
648 | int csio_fwevtq_handler(struct csio_hw *); | ||
649 | void csio_evtq_worker(struct work_struct *); | ||
650 | int csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, | ||
651 | void *evt_msg, uint16_t len); | ||
652 | void csio_evtq_flush(struct csio_hw *hw); | ||
653 | |||
654 | int csio_request_irqs(struct csio_hw *); | ||
655 | void csio_intr_enable(struct csio_hw *); | ||
656 | void csio_intr_disable(struct csio_hw *, bool); | ||
657 | |||
658 | struct csio_lnode *csio_lnode_alloc(struct csio_hw *); | ||
659 | int csio_config_queues(struct csio_hw *); | ||
660 | |||
661 | int csio_hw_mc_read(struct csio_hw *, uint32_t, | ||
662 | uint32_t *, uint64_t *); | ||
663 | int csio_hw_edc_read(struct csio_hw *, int, uint32_t, uint32_t *, | ||
664 | uint64_t *); | ||
665 | int csio_hw_init(struct csio_hw *); | ||
666 | void csio_hw_exit(struct csio_hw *); | ||
667 | #endif /* ifndef __CSIO_HW_H__ */ | ||
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c new file mode 100644 index 000000000000..fdd408ff80ad --- /dev/null +++ b/drivers/scsi/csiostor/csio_init.c | |||
@@ -0,0 +1,1274 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
36 | |||
37 | #include <linux/kernel.h> | ||
38 | #include <linux/module.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/pci.h> | ||
41 | #include <linux/aer.h> | ||
42 | #include <linux/mm.h> | ||
43 | #include <linux/notifier.h> | ||
44 | #include <linux/kdebug.h> | ||
45 | #include <linux/seq_file.h> | ||
46 | #include <linux/debugfs.h> | ||
47 | #include <linux/string.h> | ||
48 | #include <linux/export.h> | ||
49 | |||
50 | #include "csio_init.h" | ||
51 | #include "csio_defs.h" | ||
52 | |||
53 | #define CSIO_MIN_MEMPOOL_SZ 64 | ||
54 | |||
55 | static struct dentry *csio_debugfs_root; | ||
56 | |||
57 | static struct scsi_transport_template *csio_fcoe_transport; | ||
58 | static struct scsi_transport_template *csio_fcoe_transport_vport; | ||
59 | |||
60 | /* | ||
61 | * debugfs support | ||
62 | */ | ||
63 | static int | ||
64 | csio_mem_open(struct inode *inode, struct file *file) | ||
65 | { | ||
66 | file->private_data = inode->i_private; | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static ssize_t | ||
71 | csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | ||
72 | { | ||
73 | loff_t pos = *ppos; | ||
74 | loff_t avail = file->f_path.dentry->d_inode->i_size; | ||
75 | unsigned int mem = (uintptr_t)file->private_data & 3; | ||
76 | struct csio_hw *hw = file->private_data - mem; | ||
77 | |||
78 | if (pos < 0) | ||
79 | return -EINVAL; | ||
80 | if (pos >= avail) | ||
81 | return 0; | ||
82 | if (count > avail - pos) | ||
83 | count = avail - pos; | ||
84 | |||
85 | while (count) { | ||
86 | size_t len; | ||
87 | int ret, ofst; | ||
88 | __be32 data[16]; | ||
89 | |||
90 | if (mem == MEM_MC) | ||
91 | ret = csio_hw_mc_read(hw, pos, data, NULL); | ||
92 | else | ||
93 | ret = csio_hw_edc_read(hw, mem, pos, data, NULL); | ||
94 | if (ret) | ||
95 | return ret; | ||
96 | |||
97 | ofst = pos % sizeof(data); | ||
98 | len = min(count, sizeof(data) - ofst); | ||
99 | if (copy_to_user(buf, (u8 *)data + ofst, len)) | ||
100 | return -EFAULT; | ||
101 | |||
102 | buf += len; | ||
103 | pos += len; | ||
104 | count -= len; | ||
105 | } | ||
106 | count = pos - *ppos; | ||
107 | *ppos = pos; | ||
108 | return count; | ||
109 | } | ||
110 | |||
111 | static const struct file_operations csio_mem_debugfs_fops = { | ||
112 | .owner = THIS_MODULE, | ||
113 | .open = csio_mem_open, | ||
114 | .read = csio_mem_read, | ||
115 | .llseek = default_llseek, | ||
116 | }; | ||
117 | |||
118 | static void __devinit | ||
119 | csio_add_debugfs_mem(struct csio_hw *hw, const char *name, | ||
120 | unsigned int idx, unsigned int size_mb) | ||
121 | { | ||
122 | struct dentry *de; | ||
123 | |||
124 | de = debugfs_create_file(name, S_IRUSR, hw->debugfs_root, | ||
125 | (void *)hw + idx, &csio_mem_debugfs_fops); | ||
126 | if (de && de->d_inode) | ||
127 | de->d_inode->i_size = size_mb << 20; | ||
128 | } | ||
129 | |||
130 | static int __devinit | ||
131 | csio_setup_debugfs(struct csio_hw *hw) | ||
132 | { | ||
133 | int i; | ||
134 | |||
135 | if (IS_ERR_OR_NULL(hw->debugfs_root)) | ||
136 | return -1; | ||
137 | |||
138 | i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE); | ||
139 | if (i & EDRAM0_ENABLE) | ||
140 | csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5); | ||
141 | if (i & EDRAM1_ENABLE) | ||
142 | csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5); | ||
143 | if (i & EXT_MEM_ENABLE) | ||
144 | csio_add_debugfs_mem(hw, "mc", MEM_MC, | ||
145 | EXT_MEM_SIZE_GET(csio_rd_reg32(hw, MA_EXT_MEMORY_BAR))); | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * csio_dfs_create - Creates and sets up per-hw debugfs. | ||
151 | * | ||
152 | */ | ||
153 | static int | ||
154 | csio_dfs_create(struct csio_hw *hw) | ||
155 | { | ||
156 | if (csio_debugfs_root) { | ||
157 | hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev), | ||
158 | csio_debugfs_root); | ||
159 | csio_setup_debugfs(hw); | ||
160 | } | ||
161 | |||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * csio_dfs_destroy - Destroys per-hw debugfs. | ||
167 | */ | ||
168 | static int | ||
169 | csio_dfs_destroy(struct csio_hw *hw) | ||
170 | { | ||
171 | if (hw->debugfs_root) | ||
172 | debugfs_remove_recursive(hw->debugfs_root); | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * csio_dfs_init - Debug filesystem initialization for the module. | ||
179 | * | ||
180 | */ | ||
181 | static int | ||
182 | csio_dfs_init(void) | ||
183 | { | ||
184 | csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); | ||
185 | if (!csio_debugfs_root) | ||
186 | pr_warn("Could not create debugfs entry, continuing\n"); | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * csio_dfs_exit - debugfs cleanup for the module. | ||
193 | */ | ||
194 | static void | ||
195 | csio_dfs_exit(void) | ||
196 | { | ||
197 | debugfs_remove(csio_debugfs_root); | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * csio_pci_init - PCI initialization. | ||
202 | * @pdev: PCI device. | ||
203 | * @bars: Bitmask of bars to be requested. | ||
204 | * | ||
205 | * Initializes the PCI function by enabling MMIO, setting bus | ||
206 | * mastership and setting DMA mask. | ||
207 | */ | ||
208 | static int | ||
209 | csio_pci_init(struct pci_dev *pdev, int *bars) | ||
210 | { | ||
211 | int rv = -ENODEV; | ||
212 | |||
213 | *bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
214 | |||
215 | if (pci_enable_device_mem(pdev)) | ||
216 | goto err; | ||
217 | |||
218 | if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME)) | ||
219 | goto err_disable_device; | ||
220 | |||
221 | pci_set_master(pdev); | ||
222 | pci_try_set_mwi(pdev); | ||
223 | |||
224 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
225 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
226 | } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { | ||
227 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
228 | } else { | ||
229 | dev_err(&pdev->dev, "No suitable DMA available.\n"); | ||
230 | goto err_release_regions; | ||
231 | } | ||
232 | |||
233 | return 0; | ||
234 | |||
235 | err_release_regions: | ||
236 | pci_release_selected_regions(pdev, *bars); | ||
237 | err_disable_device: | ||
238 | pci_disable_device(pdev); | ||
239 | err: | ||
240 | return rv; | ||
241 | |||
242 | } | ||
243 | |||
244 | /* | ||
245 | * csio_pci_exit - PCI unitialization. | ||
246 | * @pdev: PCI device. | ||
247 | * @bars: Bars to be released. | ||
248 | * | ||
249 | */ | ||
250 | static void | ||
251 | csio_pci_exit(struct pci_dev *pdev, int *bars) | ||
252 | { | ||
253 | pci_release_selected_regions(pdev, *bars); | ||
254 | pci_disable_device(pdev); | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * csio_hw_init_workers - Initialize the HW module's worker threads. | ||
259 | * @hw: HW module. | ||
260 | * | ||
261 | */ | ||
262 | static void | ||
263 | csio_hw_init_workers(struct csio_hw *hw) | ||
264 | { | ||
265 | INIT_WORK(&hw->evtq_work, csio_evtq_worker); | ||
266 | } | ||
267 | |||
268 | static void | ||
269 | csio_hw_exit_workers(struct csio_hw *hw) | ||
270 | { | ||
271 | cancel_work_sync(&hw->evtq_work); | ||
272 | flush_scheduled_work(); | ||
273 | } | ||
274 | |||
275 | static int | ||
276 | csio_create_queues(struct csio_hw *hw) | ||
277 | { | ||
278 | int i, j; | ||
279 | struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); | ||
280 | int rv; | ||
281 | struct csio_scsi_cpu_info *info; | ||
282 | |||
283 | if (hw->flags & CSIO_HWF_Q_FW_ALLOCED) | ||
284 | return 0; | ||
285 | |||
286 | if (hw->intr_mode != CSIO_IM_MSIX) { | ||
287 | rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx, | ||
288 | 0, hw->pport[0].portid, false, NULL); | ||
289 | if (rv != 0) { | ||
290 | csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv); | ||
291 | return rv; | ||
292 | } | ||
293 | } | ||
294 | |||
295 | /* FW event queue */ | ||
296 | rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx, | ||
297 | csio_get_fwevt_intr_idx(hw), | ||
298 | hw->pport[0].portid, true, NULL); | ||
299 | if (rv != 0) { | ||
300 | csio_err(hw, "FW event IQ config failed!: %d\n", rv); | ||
301 | return rv; | ||
302 | } | ||
303 | |||
304 | /* Create mgmt queue */ | ||
305 | rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx, | ||
306 | mgmtm->iq_idx, hw->pport[0].portid, NULL); | ||
307 | |||
308 | if (rv != 0) { | ||
309 | csio_err(hw, "Mgmt EQ create failed!: %d\n", rv); | ||
310 | goto err; | ||
311 | } | ||
312 | |||
313 | /* Create SCSI queues */ | ||
314 | for (i = 0; i < hw->num_pports; i++) { | ||
315 | info = &hw->scsi_cpu_info[i]; | ||
316 | |||
317 | for (j = 0; j < info->max_cpus; j++) { | ||
318 | struct csio_scsi_qset *sqset = &hw->sqset[i][j]; | ||
319 | |||
320 | rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx, | ||
321 | sqset->intr_idx, i, false, NULL); | ||
322 | if (rv != 0) { | ||
323 | csio_err(hw, | ||
324 | "SCSI module IQ config failed [%d][%d]:%d\n", | ||
325 | i, j, rv); | ||
326 | goto err; | ||
327 | } | ||
328 | rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx, | ||
329 | sqset->iq_idx, i, NULL); | ||
330 | if (rv != 0) { | ||
331 | csio_err(hw, | ||
332 | "SCSI module EQ config failed [%d][%d]:%d\n", | ||
333 | i, j, rv); | ||
334 | goto err; | ||
335 | } | ||
336 | } /* for all CPUs */ | ||
337 | } /* For all ports */ | ||
338 | |||
339 | hw->flags |= CSIO_HWF_Q_FW_ALLOCED; | ||
340 | return 0; | ||
341 | err: | ||
342 | csio_wr_destroy_queues(hw, true); | ||
343 | return -EINVAL; | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * csio_config_queues - Configure the DMA queues. | ||
348 | * @hw: HW module. | ||
349 | * | ||
350 | * Allocates memory for queues are registers them with FW. | ||
351 | */ | ||
352 | int | ||
353 | csio_config_queues(struct csio_hw *hw) | ||
354 | { | ||
355 | int i, j, idx, k = 0; | ||
356 | int rv; | ||
357 | struct csio_scsi_qset *sqset; | ||
358 | struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); | ||
359 | struct csio_scsi_qset *orig; | ||
360 | struct csio_scsi_cpu_info *info; | ||
361 | |||
362 | if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED) | ||
363 | return csio_create_queues(hw); | ||
364 | |||
365 | /* Calculate number of SCSI queues for MSIX we would like */ | ||
366 | hw->num_scsi_msix_cpus = num_online_cpus(); | ||
367 | hw->num_sqsets = num_online_cpus() * hw->num_pports; | ||
368 | |||
369 | if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) { | ||
370 | hw->num_sqsets = CSIO_MAX_SCSI_QSETS; | ||
371 | hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU; | ||
372 | } | ||
373 | |||
374 | /* Initialize max_cpus, may get reduced during msix allocations */ | ||
375 | for (i = 0; i < hw->num_pports; i++) | ||
376 | hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus; | ||
377 | |||
378 | csio_dbg(hw, "nsqsets:%d scpus:%d\n", | ||
379 | hw->num_sqsets, hw->num_scsi_msix_cpus); | ||
380 | |||
381 | csio_intr_enable(hw); | ||
382 | |||
383 | if (hw->intr_mode != CSIO_IM_MSIX) { | ||
384 | |||
385 | /* Allocate Forward interrupt iq. */ | ||
386 | hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE, | ||
387 | CSIO_INTR_WRSIZE, CSIO_INGRESS, | ||
388 | (void *)hw, 0, 0, NULL); | ||
389 | if (hw->intr_iq_idx == -1) { | ||
390 | csio_err(hw, | ||
391 | "Forward interrupt queue creation failed\n"); | ||
392 | goto intr_disable; | ||
393 | } | ||
394 | } | ||
395 | |||
396 | /* Allocate the FW evt queue */ | ||
397 | hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE, | ||
398 | CSIO_FWEVT_WRSIZE, | ||
399 | CSIO_INGRESS, (void *)hw, | ||
400 | CSIO_FWEVT_FLBUFS, 0, | ||
401 | csio_fwevt_intx_handler); | ||
402 | if (hw->fwevt_iq_idx == -1) { | ||
403 | csio_err(hw, "FW evt queue creation failed\n"); | ||
404 | goto intr_disable; | ||
405 | } | ||
406 | |||
407 | /* Allocate the mgmt queue */ | ||
408 | mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE, | ||
409 | CSIO_MGMT_EQ_WRSIZE, | ||
410 | CSIO_EGRESS, (void *)hw, 0, 0, NULL); | ||
411 | if (mgmtm->eq_idx == -1) { | ||
412 | csio_err(hw, "Failed to alloc egress queue for mgmt module\n"); | ||
413 | goto intr_disable; | ||
414 | } | ||
415 | |||
416 | /* Use FW IQ for MGMT req completion */ | ||
417 | mgmtm->iq_idx = hw->fwevt_iq_idx; | ||
418 | |||
419 | /* Allocate SCSI queues */ | ||
420 | for (i = 0; i < hw->num_pports; i++) { | ||
421 | info = &hw->scsi_cpu_info[i]; | ||
422 | |||
423 | for (j = 0; j < hw->num_scsi_msix_cpus; j++) { | ||
424 | sqset = &hw->sqset[i][j]; | ||
425 | |||
426 | if (j >= info->max_cpus) { | ||
427 | k = j % info->max_cpus; | ||
428 | orig = &hw->sqset[i][k]; | ||
429 | sqset->eq_idx = orig->eq_idx; | ||
430 | sqset->iq_idx = orig->iq_idx; | ||
431 | continue; | ||
432 | } | ||
433 | |||
434 | idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0, | ||
435 | CSIO_EGRESS, (void *)hw, 0, 0, | ||
436 | NULL); | ||
437 | if (idx == -1) { | ||
438 | csio_err(hw, "EQ creation failed for idx:%d\n", | ||
439 | idx); | ||
440 | goto intr_disable; | ||
441 | } | ||
442 | |||
443 | sqset->eq_idx = idx; | ||
444 | |||
445 | idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE, | ||
446 | CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS, | ||
447 | (void *)hw, 0, 0, | ||
448 | csio_scsi_intx_handler); | ||
449 | if (idx == -1) { | ||
450 | csio_err(hw, "IQ creation failed for idx:%d\n", | ||
451 | idx); | ||
452 | goto intr_disable; | ||
453 | } | ||
454 | sqset->iq_idx = idx; | ||
455 | } /* for all CPUs */ | ||
456 | } /* For all ports */ | ||
457 | |||
458 | hw->flags |= CSIO_HWF_Q_MEM_ALLOCED; | ||
459 | |||
460 | rv = csio_create_queues(hw); | ||
461 | if (rv != 0) | ||
462 | goto intr_disable; | ||
463 | |||
464 | /* | ||
465 | * Now request IRQs for the vectors. In the event of a failure, | ||
466 | * cleanup is handled internally by this function. | ||
467 | */ | ||
468 | rv = csio_request_irqs(hw); | ||
469 | if (rv != 0) | ||
470 | return -EINVAL; | ||
471 | |||
472 | return 0; | ||
473 | |||
474 | intr_disable: | ||
475 | csio_intr_disable(hw, false); | ||
476 | |||
477 | return -EINVAL; | ||
478 | } | ||
479 | |||
480 | static int | ||
481 | csio_resource_alloc(struct csio_hw *hw) | ||
482 | { | ||
483 | struct csio_wrm *wrm = csio_hw_to_wrm(hw); | ||
484 | int rv = -ENOMEM; | ||
485 | |||
486 | wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ + | ||
487 | CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ); | ||
488 | |||
489 | hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ, | ||
490 | sizeof(struct csio_mb)); | ||
491 | if (!hw->mb_mempool) | ||
492 | goto err; | ||
493 | |||
494 | hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ, | ||
495 | sizeof(struct csio_rnode)); | ||
496 | if (!hw->rnode_mempool) | ||
497 | goto err_free_mb_mempool; | ||
498 | |||
499 | hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev, | ||
500 | CSIO_SCSI_RSP_LEN, 8, 0); | ||
501 | if (!hw->scsi_pci_pool) | ||
502 | goto err_free_rn_pool; | ||
503 | |||
504 | return 0; | ||
505 | |||
506 | err_free_rn_pool: | ||
507 | mempool_destroy(hw->rnode_mempool); | ||
508 | hw->rnode_mempool = NULL; | ||
509 | err_free_mb_mempool: | ||
510 | mempool_destroy(hw->mb_mempool); | ||
511 | hw->mb_mempool = NULL; | ||
512 | err: | ||
513 | return rv; | ||
514 | } | ||
515 | |||
516 | static void | ||
517 | csio_resource_free(struct csio_hw *hw) | ||
518 | { | ||
519 | pci_pool_destroy(hw->scsi_pci_pool); | ||
520 | hw->scsi_pci_pool = NULL; | ||
521 | mempool_destroy(hw->rnode_mempool); | ||
522 | hw->rnode_mempool = NULL; | ||
523 | mempool_destroy(hw->mb_mempool); | ||
524 | hw->mb_mempool = NULL; | ||
525 | } | ||
526 | |||
527 | /* | ||
528 | * csio_hw_alloc - Allocate and initialize the HW module. | ||
529 | * @pdev: PCI device. | ||
530 | * | ||
531 | * Allocates HW structure, DMA, memory resources, maps BARS to | ||
532 | * host memory and initializes HW module. | ||
533 | */ | ||
534 | static struct csio_hw * __devinit | ||
535 | csio_hw_alloc(struct pci_dev *pdev) | ||
536 | { | ||
537 | struct csio_hw *hw; | ||
538 | |||
539 | hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL); | ||
540 | if (!hw) | ||
541 | goto err; | ||
542 | |||
543 | hw->pdev = pdev; | ||
544 | strncpy(hw->drv_version, CSIO_DRV_VERSION, 32); | ||
545 | |||
546 | /* memory pool/DMA pool allocation */ | ||
547 | if (csio_resource_alloc(hw)) | ||
548 | goto err_free_hw; | ||
549 | |||
550 | /* Get the start address of registers from BAR 0 */ | ||
551 | hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0), | ||
552 | pci_resource_len(pdev, 0)); | ||
553 | if (!hw->regstart) { | ||
554 | csio_err(hw, "Could not map BAR 0, regstart = %p\n", | ||
555 | hw->regstart); | ||
556 | goto err_resource_free; | ||
557 | } | ||
558 | |||
559 | csio_hw_init_workers(hw); | ||
560 | |||
561 | if (csio_hw_init(hw)) | ||
562 | goto err_unmap_bar; | ||
563 | |||
564 | csio_dfs_create(hw); | ||
565 | |||
566 | csio_dbg(hw, "hw:%p\n", hw); | ||
567 | |||
568 | return hw; | ||
569 | |||
570 | err_unmap_bar: | ||
571 | csio_hw_exit_workers(hw); | ||
572 | iounmap(hw->regstart); | ||
573 | err_resource_free: | ||
574 | csio_resource_free(hw); | ||
575 | err_free_hw: | ||
576 | kfree(hw); | ||
577 | err: | ||
578 | return NULL; | ||
579 | } | ||
580 | |||
581 | /* | ||
582 | * csio_hw_free - Uninitialize and free the HW module. | ||
583 | * @hw: The HW module | ||
584 | * | ||
585 | * Disable interrupts, uninit the HW module, free resources, free hw. | ||
586 | */ | ||
587 | static void | ||
588 | csio_hw_free(struct csio_hw *hw) | ||
589 | { | ||
590 | csio_intr_disable(hw, true); | ||
591 | csio_hw_exit_workers(hw); | ||
592 | csio_hw_exit(hw); | ||
593 | iounmap(hw->regstart); | ||
594 | csio_dfs_destroy(hw); | ||
595 | csio_resource_free(hw); | ||
596 | kfree(hw); | ||
597 | } | ||
598 | |||
599 | /** | ||
600 | * csio_shost_init - Create and initialize the lnode module. | ||
601 | * @hw: The HW module. | ||
602 | * @dev: The device associated with this invocation. | ||
603 | * @probe: Called from probe context or not? | ||
604 | * @os_pln: Parent lnode if any. | ||
605 | * | ||
606 | * Allocates lnode structure via scsi_host_alloc, initializes | ||
607 | * shost, initializes lnode module and registers with SCSI ML | ||
608 | * via scsi_host_add. This function is shared between physical and | ||
609 | * virtual node ports. | ||
610 | */ | ||
611 | struct csio_lnode * | ||
612 | csio_shost_init(struct csio_hw *hw, struct device *dev, | ||
613 | bool probe, struct csio_lnode *pln) | ||
614 | { | ||
615 | struct Scsi_Host *shost = NULL; | ||
616 | struct csio_lnode *ln; | ||
617 | |||
618 | csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth; | ||
619 | csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth; | ||
620 | |||
621 | /* | ||
622 | * hw->pdev is the physical port's PCI dev structure, | ||
623 | * which will be different from the NPIV dev structure. | ||
624 | */ | ||
625 | if (dev == &hw->pdev->dev) | ||
626 | shost = scsi_host_alloc( | ||
627 | &csio_fcoe_shost_template, | ||
628 | sizeof(struct csio_lnode)); | ||
629 | else | ||
630 | shost = scsi_host_alloc( | ||
631 | &csio_fcoe_shost_vport_template, | ||
632 | sizeof(struct csio_lnode)); | ||
633 | |||
634 | if (!shost) | ||
635 | goto err; | ||
636 | |||
637 | ln = shost_priv(shost); | ||
638 | memset(ln, 0, sizeof(struct csio_lnode)); | ||
639 | |||
640 | /* Link common lnode to this lnode */ | ||
641 | ln->dev_num = (shost->host_no << 16); | ||
642 | |||
643 | shost->can_queue = CSIO_MAX_QUEUE; | ||
644 | shost->this_id = -1; | ||
645 | shost->unique_id = shost->host_no; | ||
646 | shost->max_cmd_len = 16; /* Max CDB length supported */ | ||
647 | shost->max_id = min_t(uint32_t, csio_fcoe_rnodes, | ||
648 | hw->fres_info.max_ssns); | ||
649 | shost->max_lun = CSIO_MAX_LUN; | ||
650 | if (dev == &hw->pdev->dev) | ||
651 | shost->transportt = csio_fcoe_transport; | ||
652 | else | ||
653 | shost->transportt = csio_fcoe_transport_vport; | ||
654 | |||
655 | /* root lnode */ | ||
656 | if (!hw->rln) | ||
657 | hw->rln = ln; | ||
658 | |||
659 | /* Other initialization here: Common, Transport specific */ | ||
660 | if (csio_lnode_init(ln, hw, pln)) | ||
661 | goto err_shost_put; | ||
662 | |||
663 | if (scsi_add_host(shost, dev)) | ||
664 | goto err_lnode_exit; | ||
665 | |||
666 | return ln; | ||
667 | |||
668 | err_lnode_exit: | ||
669 | csio_lnode_exit(ln); | ||
670 | err_shost_put: | ||
671 | scsi_host_put(shost); | ||
672 | err: | ||
673 | return NULL; | ||
674 | } | ||
675 | |||
676 | /** | ||
677 | * csio_shost_exit - De-instantiate the shost. | ||
678 | * @ln: The lnode module corresponding to the shost. | ||
679 | * | ||
680 | */ | ||
681 | void | ||
682 | csio_shost_exit(struct csio_lnode *ln) | ||
683 | { | ||
684 | struct Scsi_Host *shost = csio_ln_to_shost(ln); | ||
685 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
686 | |||
687 | /* Inform transport */ | ||
688 | fc_remove_host(shost); | ||
689 | |||
690 | /* Inform SCSI ML */ | ||
691 | scsi_remove_host(shost); | ||
692 | |||
693 | /* Flush all the events, so that any rnode removal events | ||
694 | * already queued are all handled, before we remove the lnode. | ||
695 | */ | ||
696 | spin_lock_irq(&hw->lock); | ||
697 | csio_evtq_flush(hw); | ||
698 | spin_unlock_irq(&hw->lock); | ||
699 | |||
700 | csio_lnode_exit(ln); | ||
701 | scsi_host_put(shost); | ||
702 | } | ||
703 | |||
704 | struct csio_lnode * | ||
705 | csio_lnode_alloc(struct csio_hw *hw) | ||
706 | { | ||
707 | return csio_shost_init(hw, &hw->pdev->dev, false, NULL); | ||
708 | } | ||
709 | |||
710 | void | ||
711 | csio_lnodes_block_request(struct csio_hw *hw) | ||
712 | { | ||
713 | struct Scsi_Host *shost; | ||
714 | struct csio_lnode *sln; | ||
715 | struct csio_lnode *ln; | ||
716 | struct list_head *cur_ln, *cur_cln; | ||
717 | struct csio_lnode **lnode_list; | ||
718 | int cur_cnt = 0, ii; | ||
719 | |||
720 | lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), | ||
721 | GFP_KERNEL); | ||
722 | if (!lnode_list) { | ||
723 | csio_err(hw, "Failed to allocate lnodes_list"); | ||
724 | return; | ||
725 | } | ||
726 | |||
727 | spin_lock_irq(&hw->lock); | ||
728 | /* Traverse sibling lnodes */ | ||
729 | list_for_each(cur_ln, &hw->sln_head) { | ||
730 | sln = (struct csio_lnode *) cur_ln; | ||
731 | lnode_list[cur_cnt++] = sln; | ||
732 | |||
733 | /* Traverse children lnodes */ | ||
734 | list_for_each(cur_cln, &sln->cln_head) | ||
735 | lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; | ||
736 | } | ||
737 | spin_unlock_irq(&hw->lock); | ||
738 | |||
739 | for (ii = 0; ii < cur_cnt; ii++) { | ||
740 | csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]); | ||
741 | ln = lnode_list[ii]; | ||
742 | shost = csio_ln_to_shost(ln); | ||
743 | scsi_block_requests(shost); | ||
744 | |||
745 | } | ||
746 | kfree(lnode_list); | ||
747 | } | ||
748 | |||
749 | void | ||
750 | csio_lnodes_unblock_request(struct csio_hw *hw) | ||
751 | { | ||
752 | struct csio_lnode *ln; | ||
753 | struct Scsi_Host *shost; | ||
754 | struct csio_lnode *sln; | ||
755 | struct list_head *cur_ln, *cur_cln; | ||
756 | struct csio_lnode **lnode_list; | ||
757 | int cur_cnt = 0, ii; | ||
758 | |||
759 | lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), | ||
760 | GFP_KERNEL); | ||
761 | if (!lnode_list) { | ||
762 | csio_err(hw, "Failed to allocate lnodes_list"); | ||
763 | return; | ||
764 | } | ||
765 | |||
766 | spin_lock_irq(&hw->lock); | ||
767 | /* Traverse sibling lnodes */ | ||
768 | list_for_each(cur_ln, &hw->sln_head) { | ||
769 | sln = (struct csio_lnode *) cur_ln; | ||
770 | lnode_list[cur_cnt++] = sln; | ||
771 | |||
772 | /* Traverse children lnodes */ | ||
773 | list_for_each(cur_cln, &sln->cln_head) | ||
774 | lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; | ||
775 | } | ||
776 | spin_unlock_irq(&hw->lock); | ||
777 | |||
778 | for (ii = 0; ii < cur_cnt; ii++) { | ||
779 | csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]); | ||
780 | ln = lnode_list[ii]; | ||
781 | shost = csio_ln_to_shost(ln); | ||
782 | scsi_unblock_requests(shost); | ||
783 | } | ||
784 | kfree(lnode_list); | ||
785 | } | ||
786 | |||
787 | void | ||
788 | csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid) | ||
789 | { | ||
790 | struct csio_lnode *ln; | ||
791 | struct Scsi_Host *shost; | ||
792 | struct csio_lnode *sln; | ||
793 | struct list_head *cur_ln, *cur_cln; | ||
794 | struct csio_lnode **lnode_list; | ||
795 | int cur_cnt = 0, ii; | ||
796 | |||
797 | lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), | ||
798 | GFP_KERNEL); | ||
799 | if (!lnode_list) { | ||
800 | csio_err(hw, "Failed to allocate lnodes_list"); | ||
801 | return; | ||
802 | } | ||
803 | |||
804 | spin_lock_irq(&hw->lock); | ||
805 | /* Traverse sibling lnodes */ | ||
806 | list_for_each(cur_ln, &hw->sln_head) { | ||
807 | sln = (struct csio_lnode *) cur_ln; | ||
808 | if (sln->portid != portid) | ||
809 | continue; | ||
810 | |||
811 | lnode_list[cur_cnt++] = sln; | ||
812 | |||
813 | /* Traverse children lnodes */ | ||
814 | list_for_each(cur_cln, &sln->cln_head) | ||
815 | lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; | ||
816 | } | ||
817 | spin_unlock_irq(&hw->lock); | ||
818 | |||
819 | for (ii = 0; ii < cur_cnt; ii++) { | ||
820 | csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]); | ||
821 | ln = lnode_list[ii]; | ||
822 | shost = csio_ln_to_shost(ln); | ||
823 | scsi_block_requests(shost); | ||
824 | } | ||
825 | kfree(lnode_list); | ||
826 | } | ||
827 | |||
828 | void | ||
829 | csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid) | ||
830 | { | ||
831 | struct csio_lnode *ln; | ||
832 | struct Scsi_Host *shost; | ||
833 | struct csio_lnode *sln; | ||
834 | struct list_head *cur_ln, *cur_cln; | ||
835 | struct csio_lnode **lnode_list; | ||
836 | int cur_cnt = 0, ii; | ||
837 | |||
838 | lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), | ||
839 | GFP_KERNEL); | ||
840 | if (!lnode_list) { | ||
841 | csio_err(hw, "Failed to allocate lnodes_list"); | ||
842 | return; | ||
843 | } | ||
844 | |||
845 | spin_lock_irq(&hw->lock); | ||
846 | /* Traverse sibling lnodes */ | ||
847 | list_for_each(cur_ln, &hw->sln_head) { | ||
848 | sln = (struct csio_lnode *) cur_ln; | ||
849 | if (sln->portid != portid) | ||
850 | continue; | ||
851 | lnode_list[cur_cnt++] = sln; | ||
852 | |||
853 | /* Traverse children lnodes */ | ||
854 | list_for_each(cur_cln, &sln->cln_head) | ||
855 | lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; | ||
856 | } | ||
857 | spin_unlock_irq(&hw->lock); | ||
858 | |||
859 | for (ii = 0; ii < cur_cnt; ii++) { | ||
860 | csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]); | ||
861 | ln = lnode_list[ii]; | ||
862 | shost = csio_ln_to_shost(ln); | ||
863 | scsi_unblock_requests(shost); | ||
864 | } | ||
865 | kfree(lnode_list); | ||
866 | } | ||
867 | |||
868 | void | ||
869 | csio_lnodes_exit(struct csio_hw *hw, bool npiv) | ||
870 | { | ||
871 | struct csio_lnode *sln; | ||
872 | struct csio_lnode *ln; | ||
873 | struct list_head *cur_ln, *cur_cln; | ||
874 | struct csio_lnode **lnode_list; | ||
875 | int cur_cnt = 0, ii; | ||
876 | |||
877 | lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), | ||
878 | GFP_KERNEL); | ||
879 | if (!lnode_list) { | ||
880 | csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n"); | ||
881 | return; | ||
882 | } | ||
883 | |||
884 | /* Get all child lnodes(NPIV ports) */ | ||
885 | spin_lock_irq(&hw->lock); | ||
886 | list_for_each(cur_ln, &hw->sln_head) { | ||
887 | sln = (struct csio_lnode *) cur_ln; | ||
888 | |||
889 | /* Traverse children lnodes */ | ||
890 | list_for_each(cur_cln, &sln->cln_head) | ||
891 | lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; | ||
892 | } | ||
893 | spin_unlock_irq(&hw->lock); | ||
894 | |||
895 | /* Delete NPIV lnodes */ | ||
896 | for (ii = 0; ii < cur_cnt; ii++) { | ||
897 | csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]); | ||
898 | ln = lnode_list[ii]; | ||
899 | fc_vport_terminate(ln->fc_vport); | ||
900 | } | ||
901 | |||
902 | /* Delete only npiv lnodes */ | ||
903 | if (npiv) | ||
904 | goto free_lnodes; | ||
905 | |||
906 | cur_cnt = 0; | ||
907 | /* Get all physical lnodes */ | ||
908 | spin_lock_irq(&hw->lock); | ||
909 | /* Traverse sibling lnodes */ | ||
910 | list_for_each(cur_ln, &hw->sln_head) { | ||
911 | sln = (struct csio_lnode *) cur_ln; | ||
912 | lnode_list[cur_cnt++] = sln; | ||
913 | } | ||
914 | spin_unlock_irq(&hw->lock); | ||
915 | |||
916 | /* Delete physical lnodes */ | ||
917 | for (ii = 0; ii < cur_cnt; ii++) { | ||
918 | csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]); | ||
919 | csio_shost_exit(lnode_list[ii]); | ||
920 | } | ||
921 | |||
922 | free_lnodes: | ||
923 | kfree(lnode_list); | ||
924 | } | ||
925 | |||
926 | /* | ||
927 | * csio_lnode_init_post: Set lnode attributes after starting HW. | ||
928 | * @ln: lnode. | ||
929 | * | ||
930 | */ | ||
931 | static void | ||
932 | csio_lnode_init_post(struct csio_lnode *ln) | ||
933 | { | ||
934 | struct Scsi_Host *shost = csio_ln_to_shost(ln); | ||
935 | |||
936 | csio_fchost_attr_init(ln); | ||
937 | |||
938 | scsi_scan_host(shost); | ||
939 | } | ||
940 | |||
941 | /* | ||
942 | * csio_probe_one - Instantiate this function. | ||
943 | * @pdev: PCI device | ||
944 | * @id: Device ID | ||
945 | * | ||
946 | * This is the .probe() callback of the driver. This function: | ||
947 | * - Initializes the PCI function by enabling MMIO, setting bus | ||
948 | * mastership and setting DMA mask. | ||
949 | * - Allocates HW structure, DMA, memory resources, maps BARS to | ||
950 | * host memory and initializes HW module. | ||
951 | * - Allocates lnode structure via scsi_host_alloc, initializes | ||
952 | * shost, initialized lnode module and registers with SCSI ML | ||
953 | * via scsi_host_add. | ||
954 | * - Enables interrupts, and starts the chip by kicking off the | ||
955 | * HW state machine. | ||
956 | * - Once hardware is ready, initiated scan of the host via | ||
957 | * scsi_scan_host. | ||
958 | */ | ||
959 | static int __devinit | ||
960 | csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | ||
961 | { | ||
962 | int rv; | ||
963 | int bars; | ||
964 | int i; | ||
965 | struct csio_hw *hw; | ||
966 | struct csio_lnode *ln; | ||
967 | |||
968 | rv = csio_pci_init(pdev, &bars); | ||
969 | if (rv) | ||
970 | goto err; | ||
971 | |||
972 | hw = csio_hw_alloc(pdev); | ||
973 | if (!hw) { | ||
974 | rv = -ENODEV; | ||
975 | goto err_pci_exit; | ||
976 | } | ||
977 | |||
978 | pci_set_drvdata(pdev, hw); | ||
979 | |||
980 | if (csio_hw_start(hw) != 0) { | ||
981 | dev_err(&pdev->dev, | ||
982 | "Failed to start FW, continuing in debug mode.\n"); | ||
983 | return 0; | ||
984 | } | ||
985 | |||
986 | sprintf(hw->fwrev_str, "%u.%u.%u.%u\n", | ||
987 | FW_HDR_FW_VER_MAJOR_GET(hw->fwrev), | ||
988 | FW_HDR_FW_VER_MINOR_GET(hw->fwrev), | ||
989 | FW_HDR_FW_VER_MICRO_GET(hw->fwrev), | ||
990 | FW_HDR_FW_VER_BUILD_GET(hw->fwrev)); | ||
991 | |||
992 | for (i = 0; i < hw->num_pports; i++) { | ||
993 | ln = csio_shost_init(hw, &pdev->dev, true, NULL); | ||
994 | if (!ln) { | ||
995 | rv = -ENODEV; | ||
996 | break; | ||
997 | } | ||
998 | /* Initialize portid */ | ||
999 | ln->portid = hw->pport[i].portid; | ||
1000 | |||
1001 | spin_lock_irq(&hw->lock); | ||
1002 | if (csio_lnode_start(ln) != 0) | ||
1003 | rv = -ENODEV; | ||
1004 | spin_unlock_irq(&hw->lock); | ||
1005 | |||
1006 | if (rv) | ||
1007 | break; | ||
1008 | |||
1009 | csio_lnode_init_post(ln); | ||
1010 | } | ||
1011 | |||
1012 | if (rv) | ||
1013 | goto err_lnode_exit; | ||
1014 | |||
1015 | return 0; | ||
1016 | |||
1017 | err_lnode_exit: | ||
1018 | csio_lnodes_block_request(hw); | ||
1019 | spin_lock_irq(&hw->lock); | ||
1020 | csio_hw_stop(hw); | ||
1021 | spin_unlock_irq(&hw->lock); | ||
1022 | csio_lnodes_unblock_request(hw); | ||
1023 | pci_set_drvdata(hw->pdev, NULL); | ||
1024 | csio_lnodes_exit(hw, 0); | ||
1025 | csio_hw_free(hw); | ||
1026 | err_pci_exit: | ||
1027 | csio_pci_exit(pdev, &bars); | ||
1028 | err: | ||
1029 | dev_err(&pdev->dev, "probe of device failed: %d\n", rv); | ||
1030 | return rv; | ||
1031 | } | ||
1032 | |||
1033 | /* | ||
1034 | * csio_remove_one - Remove one instance of the driver at this PCI function. | ||
1035 | * @pdev: PCI device | ||
1036 | * | ||
1037 | * Used during hotplug operation. | ||
1038 | */ | ||
1039 | static void __devexit | ||
1040 | csio_remove_one(struct pci_dev *pdev) | ||
1041 | { | ||
1042 | struct csio_hw *hw = pci_get_drvdata(pdev); | ||
1043 | int bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
1044 | |||
1045 | csio_lnodes_block_request(hw); | ||
1046 | spin_lock_irq(&hw->lock); | ||
1047 | |||
1048 | /* Stops lnode, Rnode s/m | ||
1049 | * Quiesce IOs. | ||
1050 | * All sessions with remote ports are unregistered. | ||
1051 | */ | ||
1052 | csio_hw_stop(hw); | ||
1053 | spin_unlock_irq(&hw->lock); | ||
1054 | csio_lnodes_unblock_request(hw); | ||
1055 | |||
1056 | csio_lnodes_exit(hw, 0); | ||
1057 | csio_hw_free(hw); | ||
1058 | pci_set_drvdata(pdev, NULL); | ||
1059 | csio_pci_exit(pdev, &bars); | ||
1060 | } | ||
1061 | |||
1062 | /* | ||
1063 | * csio_pci_error_detected - PCI error was detected | ||
1064 | * @pdev: PCI device | ||
1065 | * | ||
1066 | */ | ||
1067 | static pci_ers_result_t | ||
1068 | csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) | ||
1069 | { | ||
1070 | struct csio_hw *hw = pci_get_drvdata(pdev); | ||
1071 | |||
1072 | csio_lnodes_block_request(hw); | ||
1073 | spin_lock_irq(&hw->lock); | ||
1074 | |||
1075 | /* Post PCI error detected evt to HW s/m | ||
1076 | * HW s/m handles this evt by quiescing IOs, unregisters rports | ||
1077 | * and finally takes the device to offline. | ||
1078 | */ | ||
1079 | csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED); | ||
1080 | spin_unlock_irq(&hw->lock); | ||
1081 | csio_lnodes_unblock_request(hw); | ||
1082 | csio_lnodes_exit(hw, 0); | ||
1083 | csio_intr_disable(hw, true); | ||
1084 | pci_disable_device(pdev); | ||
1085 | return state == pci_channel_io_perm_failure ? | ||
1086 | PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; | ||
1087 | } | ||
1088 | |||
1089 | /* | ||
1090 | * csio_pci_slot_reset - PCI slot has been reset. | ||
1091 | * @pdev: PCI device | ||
1092 | * | ||
1093 | */ | ||
1094 | static pci_ers_result_t | ||
1095 | csio_pci_slot_reset(struct pci_dev *pdev) | ||
1096 | { | ||
1097 | struct csio_hw *hw = pci_get_drvdata(pdev); | ||
1098 | int ready; | ||
1099 | |||
1100 | if (pci_enable_device(pdev)) { | ||
1101 | dev_err(&pdev->dev, "cannot re-enable device in slot reset\n"); | ||
1102 | return PCI_ERS_RESULT_DISCONNECT; | ||
1103 | } | ||
1104 | |||
1105 | pci_set_master(pdev); | ||
1106 | pci_restore_state(pdev); | ||
1107 | pci_save_state(pdev); | ||
1108 | pci_cleanup_aer_uncorrect_error_status(pdev); | ||
1109 | |||
1110 | /* Bring HW s/m to ready state. | ||
1111 | * but don't resume IOs. | ||
1112 | */ | ||
1113 | spin_lock_irq(&hw->lock); | ||
1114 | csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET); | ||
1115 | ready = csio_is_hw_ready(hw); | ||
1116 | spin_unlock_irq(&hw->lock); | ||
1117 | |||
1118 | if (ready) { | ||
1119 | return PCI_ERS_RESULT_RECOVERED; | ||
1120 | } else { | ||
1121 | dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n"); | ||
1122 | return PCI_ERS_RESULT_DISCONNECT; | ||
1123 | } | ||
1124 | } | ||
1125 | |||
1126 | /* | ||
1127 | * csio_pci_resume - Resume normal operations | ||
1128 | * @pdev: PCI device | ||
1129 | * | ||
1130 | */ | ||
1131 | static void | ||
1132 | csio_pci_resume(struct pci_dev *pdev) | ||
1133 | { | ||
1134 | struct csio_hw *hw = pci_get_drvdata(pdev); | ||
1135 | struct csio_lnode *ln; | ||
1136 | int rv = 0; | ||
1137 | int i; | ||
1138 | |||
1139 | /* Bring the LINK UP and Resume IO */ | ||
1140 | |||
1141 | for (i = 0; i < hw->num_pports; i++) { | ||
1142 | ln = csio_shost_init(hw, &pdev->dev, true, NULL); | ||
1143 | if (!ln) { | ||
1144 | rv = -ENODEV; | ||
1145 | break; | ||
1146 | } | ||
1147 | /* Initialize portid */ | ||
1148 | ln->portid = hw->pport[i].portid; | ||
1149 | |||
1150 | spin_lock_irq(&hw->lock); | ||
1151 | if (csio_lnode_start(ln) != 0) | ||
1152 | rv = -ENODEV; | ||
1153 | spin_unlock_irq(&hw->lock); | ||
1154 | |||
1155 | if (rv) | ||
1156 | break; | ||
1157 | |||
1158 | csio_lnode_init_post(ln); | ||
1159 | } | ||
1160 | |||
1161 | if (rv) | ||
1162 | goto err_resume_exit; | ||
1163 | |||
1164 | return; | ||
1165 | |||
1166 | err_resume_exit: | ||
1167 | csio_lnodes_block_request(hw); | ||
1168 | spin_lock_irq(&hw->lock); | ||
1169 | csio_hw_stop(hw); | ||
1170 | spin_unlock_irq(&hw->lock); | ||
1171 | csio_lnodes_unblock_request(hw); | ||
1172 | csio_lnodes_exit(hw, 0); | ||
1173 | csio_hw_free(hw); | ||
1174 | dev_err(&pdev->dev, "resume of device failed: %d\n", rv); | ||
1175 | } | ||
1176 | |||
1177 | static struct pci_error_handlers csio_err_handler = { | ||
1178 | .error_detected = csio_pci_error_detected, | ||
1179 | .slot_reset = csio_pci_slot_reset, | ||
1180 | .resume = csio_pci_resume, | ||
1181 | }; | ||
1182 | |||
1183 | static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = { | ||
1184 | CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T440DBG FCOE */ | ||
1185 | CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */ | ||
1186 | CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */ | ||
1187 | CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */ | ||
1188 | CSIO_DEVICE(CSIO_DEVID_T420BCH_FCOE, 0), /* T420BCH FCOE */ | ||
1189 | CSIO_DEVICE(CSIO_DEVID_T440BCH_FCOE, 0), /* T440BCH FCOE */ | ||
1190 | CSIO_DEVICE(CSIO_DEVID_T440CH_FCOE, 0), /* T440CH FCOE */ | ||
1191 | CSIO_DEVICE(CSIO_DEVID_T420SO_FCOE, 0), /* T420SO FCOE */ | ||
1192 | CSIO_DEVICE(CSIO_DEVID_T420CX_FCOE, 0), /* T420CX FCOE */ | ||
1193 | CSIO_DEVICE(CSIO_DEVID_T420BT_FCOE, 0), /* T420BT FCOE */ | ||
1194 | CSIO_DEVICE(CSIO_DEVID_T404BT_FCOE, 0), /* T404BT FCOE */ | ||
1195 | CSIO_DEVICE(CSIO_DEVID_B420_FCOE, 0), /* B420 FCOE */ | ||
1196 | CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */ | ||
1197 | CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */ | ||
1198 | CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */ | ||
1199 | CSIO_DEVICE(CSIO_DEVID_PE10K, 0), /* PE10K FCOE */ | ||
1200 | CSIO_DEVICE(CSIO_DEVID_PE10K_PF1, 0), /* PE10K FCOE on PF1 */ | ||
1201 | { 0, 0, 0, 0, 0, 0, 0 } | ||
1202 | }; | ||
1203 | |||
1204 | |||
1205 | static struct pci_driver csio_pci_driver = { | ||
1206 | .name = KBUILD_MODNAME, | ||
1207 | .driver = { | ||
1208 | .owner = THIS_MODULE, | ||
1209 | }, | ||
1210 | .id_table = csio_pci_tbl, | ||
1211 | .probe = csio_probe_one, | ||
1212 | .remove = csio_remove_one, | ||
1213 | .err_handler = &csio_err_handler, | ||
1214 | }; | ||
1215 | |||
1216 | /* | ||
1217 | * csio_init - Chelsio storage driver initialization function. | ||
1218 | * | ||
1219 | */ | ||
1220 | static int __init | ||
1221 | csio_init(void) | ||
1222 | { | ||
1223 | int rv = -ENOMEM; | ||
1224 | |||
1225 | pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION); | ||
1226 | |||
1227 | csio_dfs_init(); | ||
1228 | |||
1229 | csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs); | ||
1230 | if (!csio_fcoe_transport) | ||
1231 | goto err; | ||
1232 | |||
1233 | csio_fcoe_transport_vport = | ||
1234 | fc_attach_transport(&csio_fc_transport_vport_funcs); | ||
1235 | if (!csio_fcoe_transport_vport) | ||
1236 | goto err_vport; | ||
1237 | |||
1238 | rv = pci_register_driver(&csio_pci_driver); | ||
1239 | if (rv) | ||
1240 | goto err_pci; | ||
1241 | |||
1242 | return 0; | ||
1243 | |||
1244 | err_pci: | ||
1245 | fc_release_transport(csio_fcoe_transport_vport); | ||
1246 | err_vport: | ||
1247 | fc_release_transport(csio_fcoe_transport); | ||
1248 | err: | ||
1249 | csio_dfs_exit(); | ||
1250 | return rv; | ||
1251 | } | ||
1252 | |||
1253 | /* | ||
1254 | * csio_exit - Chelsio storage driver uninitialization . | ||
1255 | * | ||
1256 | * Function that gets called in the unload path. | ||
1257 | */ | ||
1258 | static void __exit | ||
1259 | csio_exit(void) | ||
1260 | { | ||
1261 | pci_unregister_driver(&csio_pci_driver); | ||
1262 | csio_dfs_exit(); | ||
1263 | fc_release_transport(csio_fcoe_transport_vport); | ||
1264 | fc_release_transport(csio_fcoe_transport); | ||
1265 | } | ||
1266 | |||
1267 | module_init(csio_init); | ||
1268 | module_exit(csio_exit); | ||
1269 | MODULE_AUTHOR(CSIO_DRV_AUTHOR); | ||
1270 | MODULE_DESCRIPTION(CSIO_DRV_DESC); | ||
1271 | MODULE_LICENSE(CSIO_DRV_LICENSE); | ||
1272 | MODULE_DEVICE_TABLE(pci, csio_pci_tbl); | ||
1273 | MODULE_VERSION(CSIO_DRV_VERSION); | ||
1274 | MODULE_FIRMWARE(CSIO_FW_FNAME); | ||
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h new file mode 100644 index 000000000000..0838fd7ec9c7 --- /dev/null +++ b/drivers/scsi/csiostor/csio_init.h | |||
@@ -0,0 +1,158 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef __CSIO_INIT_H__ | ||
36 | #define __CSIO_INIT_H__ | ||
37 | |||
38 | #include <linux/pci.h> | ||
39 | #include <linux/if_ether.h> | ||
40 | #include <scsi/scsi.h> | ||
41 | #include <scsi/scsi_device.h> | ||
42 | #include <scsi/scsi_host.h> | ||
43 | #include <scsi/scsi_transport_fc.h> | ||
44 | |||
45 | #include "csio_scsi.h" | ||
46 | #include "csio_lnode.h" | ||
47 | #include "csio_rnode.h" | ||
48 | #include "csio_hw.h" | ||
49 | |||
50 | #define CSIO_DRV_AUTHOR "Chelsio Communications" | ||
51 | #define CSIO_DRV_LICENSE "Dual BSD/GPL" | ||
52 | #define CSIO_DRV_DESC "Chelsio FCoE driver" | ||
53 | #define CSIO_DRV_VERSION "1.0.0" | ||
54 | |||
55 | #define CSIO_DEVICE(devid, idx) \ | ||
56 | { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) } | ||
57 | |||
58 | #define CSIO_IS_T4_FPGA(_dev) (((_dev) == CSIO_DEVID_PE10K) ||\ | ||
59 | ((_dev) == CSIO_DEVID_PE10K_PF1)) | ||
60 | |||
61 | /* FCoE device IDs */ | ||
62 | #define CSIO_DEVID_PE10K 0xA000 | ||
63 | #define CSIO_DEVID_PE10K_PF1 0xA001 | ||
64 | #define CSIO_DEVID_T440DBG_FCOE 0x4600 | ||
65 | #define CSIO_DEVID_T420CR_FCOE 0x4601 | ||
66 | #define CSIO_DEVID_T422CR_FCOE 0x4602 | ||
67 | #define CSIO_DEVID_T440CR_FCOE 0x4603 | ||
68 | #define CSIO_DEVID_T420BCH_FCOE 0x4604 | ||
69 | #define CSIO_DEVID_T440BCH_FCOE 0x4605 | ||
70 | #define CSIO_DEVID_T440CH_FCOE 0x4606 | ||
71 | #define CSIO_DEVID_T420SO_FCOE 0x4607 | ||
72 | #define CSIO_DEVID_T420CX_FCOE 0x4608 | ||
73 | #define CSIO_DEVID_T420BT_FCOE 0x4609 | ||
74 | #define CSIO_DEVID_T404BT_FCOE 0x460A | ||
75 | #define CSIO_DEVID_B420_FCOE 0x460B | ||
76 | #define CSIO_DEVID_B404_FCOE 0x460C | ||
77 | #define CSIO_DEVID_T480CR_FCOE 0x460D | ||
78 | #define CSIO_DEVID_T440LPCR_FCOE 0x460E | ||
79 | |||
80 | extern struct fc_function_template csio_fc_transport_funcs; | ||
81 | extern struct fc_function_template csio_fc_transport_vport_funcs; | ||
82 | |||
83 | void csio_fchost_attr_init(struct csio_lnode *); | ||
84 | |||
85 | /* INTx handlers */ | ||
86 | void csio_scsi_intx_handler(struct csio_hw *, void *, uint32_t, | ||
87 | struct csio_fl_dma_buf *, void *); | ||
88 | |||
89 | void csio_fwevt_intx_handler(struct csio_hw *, void *, uint32_t, | ||
90 | struct csio_fl_dma_buf *, void *); | ||
91 | |||
92 | /* Common os lnode APIs */ | ||
93 | void csio_lnodes_block_request(struct csio_hw *); | ||
94 | void csio_lnodes_unblock_request(struct csio_hw *); | ||
95 | void csio_lnodes_block_by_port(struct csio_hw *, uint8_t); | ||
96 | void csio_lnodes_unblock_by_port(struct csio_hw *, uint8_t); | ||
97 | |||
98 | struct csio_lnode *csio_shost_init(struct csio_hw *, struct device *, bool, | ||
99 | struct csio_lnode *); | ||
100 | void csio_shost_exit(struct csio_lnode *); | ||
101 | void csio_lnodes_exit(struct csio_hw *, bool); | ||
102 | |||
103 | static inline struct Scsi_Host * | ||
104 | csio_ln_to_shost(struct csio_lnode *ln) | ||
105 | { | ||
106 | return container_of((void *)ln, struct Scsi_Host, hostdata[0]); | ||
107 | } | ||
108 | |||
109 | /* SCSI -- locking version of get/put ioreqs */ | ||
110 | static inline struct csio_ioreq * | ||
111 | csio_get_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim) | ||
112 | { | ||
113 | struct csio_ioreq *ioreq; | ||
114 | unsigned long flags; | ||
115 | |||
116 | spin_lock_irqsave(&scsim->freelist_lock, flags); | ||
117 | ioreq = csio_get_scsi_ioreq(scsim); | ||
118 | spin_unlock_irqrestore(&scsim->freelist_lock, flags); | ||
119 | |||
120 | return ioreq; | ||
121 | } | ||
122 | |||
123 | static inline void | ||
124 | csio_put_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim, | ||
125 | struct csio_ioreq *ioreq) | ||
126 | { | ||
127 | unsigned long flags; | ||
128 | |||
129 | spin_lock_irqsave(&scsim->freelist_lock, flags); | ||
130 | csio_put_scsi_ioreq(scsim, ioreq); | ||
131 | spin_unlock_irqrestore(&scsim->freelist_lock, flags); | ||
132 | } | ||
133 | |||
134 | /* Called in interrupt context */ | ||
135 | static inline void | ||
136 | csio_put_scsi_ioreq_list_lock(struct csio_hw *hw, struct csio_scsim *scsim, | ||
137 | struct list_head *reqlist, int n) | ||
138 | { | ||
139 | unsigned long flags; | ||
140 | |||
141 | spin_lock_irqsave(&scsim->freelist_lock, flags); | ||
142 | csio_put_scsi_ioreq_list(scsim, reqlist, n); | ||
143 | spin_unlock_irqrestore(&scsim->freelist_lock, flags); | ||
144 | } | ||
145 | |||
146 | /* Called in interrupt context */ | ||
147 | static inline void | ||
148 | csio_put_scsi_ddp_list_lock(struct csio_hw *hw, struct csio_scsim *scsim, | ||
149 | struct list_head *reqlist, int n) | ||
150 | { | ||
151 | unsigned long flags; | ||
152 | |||
153 | spin_lock_irqsave(&hw->lock, flags); | ||
154 | csio_put_scsi_ddp_list(scsim, reqlist, n); | ||
155 | spin_unlock_irqrestore(&hw->lock, flags); | ||
156 | } | ||
157 | |||
158 | #endif /* ifndef __CSIO_INIT_H__ */ | ||
diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c new file mode 100644 index 000000000000..7ee9777ae2c5 --- /dev/null +++ b/drivers/scsi/csiostor/csio_isr.c | |||
@@ -0,0 +1,624 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/pci.h> | ||
37 | #include <linux/interrupt.h> | ||
38 | #include <linux/cpumask.h> | ||
39 | #include <linux/string.h> | ||
40 | |||
41 | #include "csio_init.h" | ||
42 | #include "csio_hw.h" | ||
43 | |||
44 | static irqreturn_t | ||
45 | csio_nondata_isr(int irq, void *dev_id) | ||
46 | { | ||
47 | struct csio_hw *hw = (struct csio_hw *) dev_id; | ||
48 | int rv; | ||
49 | unsigned long flags; | ||
50 | |||
51 | if (unlikely(!hw)) | ||
52 | return IRQ_NONE; | ||
53 | |||
54 | if (unlikely(pci_channel_offline(hw->pdev))) { | ||
55 | CSIO_INC_STATS(hw, n_pcich_offline); | ||
56 | return IRQ_NONE; | ||
57 | } | ||
58 | |||
59 | spin_lock_irqsave(&hw->lock, flags); | ||
60 | csio_hw_slow_intr_handler(hw); | ||
61 | rv = csio_mb_isr_handler(hw); | ||
62 | |||
63 | if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { | ||
64 | hw->flags |= CSIO_HWF_FWEVT_PENDING; | ||
65 | spin_unlock_irqrestore(&hw->lock, flags); | ||
66 | schedule_work(&hw->evtq_work); | ||
67 | return IRQ_HANDLED; | ||
68 | } | ||
69 | spin_unlock_irqrestore(&hw->lock, flags); | ||
70 | return IRQ_HANDLED; | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * csio_fwevt_handler - Common FW event handler routine. | ||
75 | * @hw: HW module. | ||
76 | * | ||
77 | * This is the ISR for FW events. It is shared b/w MSIX | ||
78 | * and INTx handlers. | ||
79 | */ | ||
80 | static void | ||
81 | csio_fwevt_handler(struct csio_hw *hw) | ||
82 | { | ||
83 | int rv; | ||
84 | unsigned long flags; | ||
85 | |||
86 | rv = csio_fwevtq_handler(hw); | ||
87 | |||
88 | spin_lock_irqsave(&hw->lock, flags); | ||
89 | if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { | ||
90 | hw->flags |= CSIO_HWF_FWEVT_PENDING; | ||
91 | spin_unlock_irqrestore(&hw->lock, flags); | ||
92 | schedule_work(&hw->evtq_work); | ||
93 | return; | ||
94 | } | ||
95 | spin_unlock_irqrestore(&hw->lock, flags); | ||
96 | |||
97 | } /* csio_fwevt_handler */ | ||
98 | |||
99 | /* | ||
100 | * csio_fwevt_isr() - FW events MSIX ISR | ||
101 | * @irq: | ||
102 | * @dev_id: | ||
103 | * | ||
104 | * Process WRs on the FW event queue. | ||
105 | * | ||
106 | */ | ||
107 | static irqreturn_t | ||
108 | csio_fwevt_isr(int irq, void *dev_id) | ||
109 | { | ||
110 | struct csio_hw *hw = (struct csio_hw *) dev_id; | ||
111 | |||
112 | if (unlikely(!hw)) | ||
113 | return IRQ_NONE; | ||
114 | |||
115 | if (unlikely(pci_channel_offline(hw->pdev))) { | ||
116 | CSIO_INC_STATS(hw, n_pcich_offline); | ||
117 | return IRQ_NONE; | ||
118 | } | ||
119 | |||
120 | csio_fwevt_handler(hw); | ||
121 | |||
122 | return IRQ_HANDLED; | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * csio_fwevt_isr() - INTx wrapper for handling FW events. | ||
127 | * @irq: | ||
128 | * @dev_id: | ||
129 | */ | ||
130 | void | ||
131 | csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len, | ||
132 | struct csio_fl_dma_buf *flb, void *priv) | ||
133 | { | ||
134 | csio_fwevt_handler(hw); | ||
135 | } /* csio_fwevt_intx_handler */ | ||
136 | |||
137 | /* | ||
138 | * csio_process_scsi_cmpl - Process a SCSI WR completion. | ||
139 | * @hw: HW module. | ||
140 | * @wr: The completed WR from the ingress queue. | ||
141 | * @len: Length of the WR. | ||
142 | * @flb: Freelist buffer array. | ||
143 | * | ||
144 | */ | ||
145 | static void | ||
146 | csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len, | ||
147 | struct csio_fl_dma_buf *flb, void *cbfn_q) | ||
148 | { | ||
149 | struct csio_ioreq *ioreq; | ||
150 | uint8_t *scsiwr; | ||
151 | uint8_t subop; | ||
152 | void *cmnd; | ||
153 | unsigned long flags; | ||
154 | |||
155 | ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr); | ||
156 | if (likely(ioreq)) { | ||
157 | if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) { | ||
158 | subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET( | ||
159 | ((struct fw_scsi_abrt_cls_wr *) | ||
160 | scsiwr)->sub_opcode_to_chk_all_io); | ||
161 | |||
162 | csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n", | ||
163 | subop ? "Close" : "Abort", | ||
164 | ioreq, ioreq->wr_status); | ||
165 | |||
166 | spin_lock_irqsave(&hw->lock, flags); | ||
167 | if (subop) | ||
168 | csio_scsi_closed(ioreq, | ||
169 | (struct list_head *)cbfn_q); | ||
170 | else | ||
171 | csio_scsi_aborted(ioreq, | ||
172 | (struct list_head *)cbfn_q); | ||
173 | /* | ||
174 | * We call scsi_done for I/Os that driver thinks aborts | ||
175 | * have timed out. If there is a race caused by FW | ||
176 | * completing abort at the exact same time that the | ||
177 | * driver has deteced the abort timeout, the following | ||
178 | * check prevents calling of scsi_done twice for the | ||
179 | * same command: once from the eh_abort_handler, another | ||
180 | * from csio_scsi_isr_handler(). This also avoids the | ||
181 | * need to check if csio_scsi_cmnd(req) is NULL in the | ||
182 | * fast path. | ||
183 | */ | ||
184 | cmnd = csio_scsi_cmnd(ioreq); | ||
185 | if (unlikely(cmnd == NULL)) | ||
186 | list_del_init(&ioreq->sm.sm_list); | ||
187 | |||
188 | spin_unlock_irqrestore(&hw->lock, flags); | ||
189 | |||
190 | if (unlikely(cmnd == NULL)) | ||
191 | csio_put_scsi_ioreq_lock(hw, | ||
192 | csio_hw_to_scsim(hw), ioreq); | ||
193 | } else { | ||
194 | spin_lock_irqsave(&hw->lock, flags); | ||
195 | csio_scsi_completed(ioreq, (struct list_head *)cbfn_q); | ||
196 | spin_unlock_irqrestore(&hw->lock, flags); | ||
197 | } | ||
198 | } | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * csio_scsi_isr_handler() - Common SCSI ISR handler. | ||
203 | * @iq: Ingress queue pointer. | ||
204 | * | ||
205 | * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx | ||
206 | * by calling csio_wr_process_iq_idx. If there are completions on the | ||
207 | * isr_cbfn_q, yank them out into a local queue and call their io_cbfns. | ||
208 | * Once done, add these completions onto the freelist. | ||
209 | * This routine is shared b/w MSIX and INTx. | ||
210 | */ | ||
211 | static inline irqreturn_t | ||
212 | csio_scsi_isr_handler(struct csio_q *iq) | ||
213 | { | ||
214 | struct csio_hw *hw = (struct csio_hw *)iq->owner; | ||
215 | LIST_HEAD(cbfn_q); | ||
216 | struct list_head *tmp; | ||
217 | struct csio_scsim *scm; | ||
218 | struct csio_ioreq *ioreq; | ||
219 | int isr_completions = 0; | ||
220 | |||
221 | scm = csio_hw_to_scsim(hw); | ||
222 | |||
223 | if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl, | ||
224 | &cbfn_q) != 0)) | ||
225 | return IRQ_NONE; | ||
226 | |||
227 | /* Call back the completion routines */ | ||
228 | list_for_each(tmp, &cbfn_q) { | ||
229 | ioreq = (struct csio_ioreq *)tmp; | ||
230 | isr_completions++; | ||
231 | ioreq->io_cbfn(hw, ioreq); | ||
232 | /* Release ddp buffer if used for this req */ | ||
233 | if (unlikely(ioreq->dcopy)) | ||
234 | csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list, | ||
235 | ioreq->nsge); | ||
236 | } | ||
237 | |||
238 | if (isr_completions) { | ||
239 | /* Return the ioreqs back to ioreq->freelist */ | ||
240 | csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q, | ||
241 | isr_completions); | ||
242 | } | ||
243 | |||
244 | return IRQ_HANDLED; | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * csio_scsi_isr() - SCSI MSIX handler | ||
249 | * @irq: | ||
250 | * @dev_id: | ||
251 | * | ||
252 | * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler() | ||
253 | * for handling SCSI completions. | ||
254 | */ | ||
255 | static irqreturn_t | ||
256 | csio_scsi_isr(int irq, void *dev_id) | ||
257 | { | ||
258 | struct csio_q *iq = (struct csio_q *) dev_id; | ||
259 | struct csio_hw *hw; | ||
260 | |||
261 | if (unlikely(!iq)) | ||
262 | return IRQ_NONE; | ||
263 | |||
264 | hw = (struct csio_hw *)iq->owner; | ||
265 | |||
266 | if (unlikely(pci_channel_offline(hw->pdev))) { | ||
267 | CSIO_INC_STATS(hw, n_pcich_offline); | ||
268 | return IRQ_NONE; | ||
269 | } | ||
270 | |||
271 | csio_scsi_isr_handler(iq); | ||
272 | |||
273 | return IRQ_HANDLED; | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | * csio_scsi_intx_handler() - SCSI INTx handler | ||
278 | * @irq: | ||
279 | * @dev_id: | ||
280 | * | ||
281 | * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler() | ||
282 | * for handling SCSI completions. | ||
283 | */ | ||
284 | void | ||
285 | csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len, | ||
286 | struct csio_fl_dma_buf *flb, void *priv) | ||
287 | { | ||
288 | struct csio_q *iq = priv; | ||
289 | |||
290 | csio_scsi_isr_handler(iq); | ||
291 | |||
292 | } /* csio_scsi_intx_handler */ | ||
293 | |||
294 | /* | ||
295 | * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE. | ||
296 | * @irq: | ||
297 | * @dev_id: | ||
298 | * | ||
299 | * | ||
300 | */ | ||
301 | static irqreturn_t | ||
302 | csio_fcoe_isr(int irq, void *dev_id) | ||
303 | { | ||
304 | struct csio_hw *hw = (struct csio_hw *) dev_id; | ||
305 | struct csio_q *intx_q = NULL; | ||
306 | int rv; | ||
307 | irqreturn_t ret = IRQ_NONE; | ||
308 | unsigned long flags; | ||
309 | |||
310 | if (unlikely(!hw)) | ||
311 | return IRQ_NONE; | ||
312 | |||
313 | if (unlikely(pci_channel_offline(hw->pdev))) { | ||
314 | CSIO_INC_STATS(hw, n_pcich_offline); | ||
315 | return IRQ_NONE; | ||
316 | } | ||
317 | |||
318 | /* Disable the interrupt for this PCI function. */ | ||
319 | if (hw->intr_mode == CSIO_IM_INTX) | ||
320 | csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI)); | ||
321 | |||
322 | /* | ||
323 | * The read in the following function will flush the | ||
324 | * above write. | ||
325 | */ | ||
326 | if (csio_hw_slow_intr_handler(hw)) | ||
327 | ret = IRQ_HANDLED; | ||
328 | |||
329 | /* Get the INTx Forward interrupt IQ. */ | ||
330 | intx_q = csio_get_q(hw, hw->intr_iq_idx); | ||
331 | |||
332 | CSIO_DB_ASSERT(intx_q); | ||
333 | |||
334 | /* IQ handler is not possible for intx_q, hence pass in NULL */ | ||
335 | if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0)) | ||
336 | ret = IRQ_HANDLED; | ||
337 | |||
338 | spin_lock_irqsave(&hw->lock, flags); | ||
339 | rv = csio_mb_isr_handler(hw); | ||
340 | if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { | ||
341 | hw->flags |= CSIO_HWF_FWEVT_PENDING; | ||
342 | spin_unlock_irqrestore(&hw->lock, flags); | ||
343 | schedule_work(&hw->evtq_work); | ||
344 | return IRQ_HANDLED; | ||
345 | } | ||
346 | spin_unlock_irqrestore(&hw->lock, flags); | ||
347 | |||
348 | return ret; | ||
349 | } | ||
350 | |||
351 | static void | ||
352 | csio_add_msix_desc(struct csio_hw *hw) | ||
353 | { | ||
354 | int i; | ||
355 | struct csio_msix_entries *entryp = &hw->msix_entries[0]; | ||
356 | int k = CSIO_EXTRA_VECS; | ||
357 | int len = sizeof(entryp->desc) - 1; | ||
358 | int cnt = hw->num_sqsets + k; | ||
359 | |||
360 | /* Non-data vector */ | ||
361 | memset(entryp->desc, 0, len + 1); | ||
362 | snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata", | ||
363 | CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw)); | ||
364 | |||
365 | entryp++; | ||
366 | memset(entryp->desc, 0, len + 1); | ||
367 | snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt", | ||
368 | CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw)); | ||
369 | entryp++; | ||
370 | |||
371 | /* Name SCSI vecs */ | ||
372 | for (i = k; i < cnt; i++, entryp++) { | ||
373 | memset(entryp->desc, 0, len + 1); | ||
374 | snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d", | ||
375 | CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), | ||
376 | CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS); | ||
377 | } | ||
378 | } | ||
379 | |||
380 | int | ||
381 | csio_request_irqs(struct csio_hw *hw) | ||
382 | { | ||
383 | int rv, i, j, k = 0; | ||
384 | struct csio_msix_entries *entryp = &hw->msix_entries[0]; | ||
385 | struct csio_scsi_cpu_info *info; | ||
386 | |||
387 | if (hw->intr_mode != CSIO_IM_MSIX) { | ||
388 | rv = request_irq(hw->pdev->irq, csio_fcoe_isr, | ||
389 | (hw->intr_mode == CSIO_IM_MSI) ? | ||
390 | 0 : IRQF_SHARED, | ||
391 | KBUILD_MODNAME, hw); | ||
392 | if (rv) { | ||
393 | if (hw->intr_mode == CSIO_IM_MSI) | ||
394 | pci_disable_msi(hw->pdev); | ||
395 | csio_err(hw, "Failed to allocate interrupt line.\n"); | ||
396 | return -EINVAL; | ||
397 | } | ||
398 | |||
399 | goto out; | ||
400 | } | ||
401 | |||
402 | /* Add the MSIX vector descriptions */ | ||
403 | csio_add_msix_desc(hw); | ||
404 | |||
405 | rv = request_irq(entryp[k].vector, csio_nondata_isr, 0, | ||
406 | entryp[k].desc, hw); | ||
407 | if (rv) { | ||
408 | csio_err(hw, "IRQ request failed for vec %d err:%d\n", | ||
409 | entryp[k].vector, rv); | ||
410 | goto err; | ||
411 | } | ||
412 | |||
413 | entryp[k++].dev_id = (void *)hw; | ||
414 | |||
415 | rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0, | ||
416 | entryp[k].desc, hw); | ||
417 | if (rv) { | ||
418 | csio_err(hw, "IRQ request failed for vec %d err:%d\n", | ||
419 | entryp[k].vector, rv); | ||
420 | goto err; | ||
421 | } | ||
422 | |||
423 | entryp[k++].dev_id = (void *)hw; | ||
424 | |||
425 | /* Allocate IRQs for SCSI */ | ||
426 | for (i = 0; i < hw->num_pports; i++) { | ||
427 | info = &hw->scsi_cpu_info[i]; | ||
428 | for (j = 0; j < info->max_cpus; j++, k++) { | ||
429 | struct csio_scsi_qset *sqset = &hw->sqset[i][j]; | ||
430 | struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx]; | ||
431 | |||
432 | rv = request_irq(entryp[k].vector, csio_scsi_isr, 0, | ||
433 | entryp[k].desc, q); | ||
434 | if (rv) { | ||
435 | csio_err(hw, | ||
436 | "IRQ request failed for vec %d err:%d\n", | ||
437 | entryp[k].vector, rv); | ||
438 | goto err; | ||
439 | } | ||
440 | |||
441 | entryp[k].dev_id = (void *)q; | ||
442 | |||
443 | } /* for all scsi cpus */ | ||
444 | } /* for all ports */ | ||
445 | |||
446 | out: | ||
447 | hw->flags |= CSIO_HWF_HOST_INTR_ENABLED; | ||
448 | |||
449 | return 0; | ||
450 | |||
451 | err: | ||
452 | for (i = 0; i < k; i++) { | ||
453 | entryp = &hw->msix_entries[i]; | ||
454 | free_irq(entryp->vector, entryp->dev_id); | ||
455 | } | ||
456 | pci_disable_msix(hw->pdev); | ||
457 | |||
458 | return -EINVAL; | ||
459 | } | ||
460 | |||
461 | static void | ||
462 | csio_disable_msix(struct csio_hw *hw, bool free) | ||
463 | { | ||
464 | int i; | ||
465 | struct csio_msix_entries *entryp; | ||
466 | int cnt = hw->num_sqsets + CSIO_EXTRA_VECS; | ||
467 | |||
468 | if (free) { | ||
469 | for (i = 0; i < cnt; i++) { | ||
470 | entryp = &hw->msix_entries[i]; | ||
471 | free_irq(entryp->vector, entryp->dev_id); | ||
472 | } | ||
473 | } | ||
474 | pci_disable_msix(hw->pdev); | ||
475 | } | ||
476 | |||
477 | /* Reduce per-port max possible CPUs */ | ||
478 | static void | ||
479 | csio_reduce_sqsets(struct csio_hw *hw, int cnt) | ||
480 | { | ||
481 | int i; | ||
482 | struct csio_scsi_cpu_info *info; | ||
483 | |||
484 | while (cnt < hw->num_sqsets) { | ||
485 | for (i = 0; i < hw->num_pports; i++) { | ||
486 | info = &hw->scsi_cpu_info[i]; | ||
487 | if (info->max_cpus > 1) { | ||
488 | info->max_cpus--; | ||
489 | hw->num_sqsets--; | ||
490 | if (hw->num_sqsets <= cnt) | ||
491 | break; | ||
492 | } | ||
493 | } | ||
494 | } | ||
495 | |||
496 | csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets); | ||
497 | } | ||
498 | |||
499 | static int | ||
500 | csio_enable_msix(struct csio_hw *hw) | ||
501 | { | ||
502 | int rv, i, j, k, n, min, cnt; | ||
503 | struct csio_msix_entries *entryp; | ||
504 | struct msix_entry *entries; | ||
505 | int extra = CSIO_EXTRA_VECS; | ||
506 | struct csio_scsi_cpu_info *info; | ||
507 | |||
508 | min = hw->num_pports + extra; | ||
509 | cnt = hw->num_sqsets + extra; | ||
510 | |||
511 | /* Max vectors required based on #niqs configured in fw */ | ||
512 | if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw)) | ||
513 | cnt = min_t(uint8_t, hw->cfg_niq, cnt); | ||
514 | |||
515 | entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL); | ||
516 | if (!entries) | ||
517 | return -ENOMEM; | ||
518 | |||
519 | for (i = 0; i < cnt; i++) | ||
520 | entries[i].entry = (uint16_t)i; | ||
521 | |||
522 | csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt); | ||
523 | |||
524 | while ((rv = pci_enable_msix(hw->pdev, entries, cnt)) >= min) | ||
525 | cnt = rv; | ||
526 | if (!rv) { | ||
527 | if (cnt < (hw->num_sqsets + extra)) { | ||
528 | csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra); | ||
529 | csio_reduce_sqsets(hw, cnt - extra); | ||
530 | } | ||
531 | } else { | ||
532 | if (rv > 0) { | ||
533 | pci_disable_msix(hw->pdev); | ||
534 | csio_info(hw, "Not using MSI-X, remainder:%d\n", rv); | ||
535 | } | ||
536 | |||
537 | kfree(entries); | ||
538 | return -ENOMEM; | ||
539 | } | ||
540 | |||
541 | /* Save off vectors */ | ||
542 | for (i = 0; i < cnt; i++) { | ||
543 | entryp = &hw->msix_entries[i]; | ||
544 | entryp->vector = entries[i].vector; | ||
545 | } | ||
546 | |||
547 | /* Distribute vectors */ | ||
548 | k = 0; | ||
549 | csio_set_nondata_intr_idx(hw, entries[k].entry); | ||
550 | csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry); | ||
551 | csio_set_fwevt_intr_idx(hw, entries[k++].entry); | ||
552 | |||
553 | for (i = 0; i < hw->num_pports; i++) { | ||
554 | info = &hw->scsi_cpu_info[i]; | ||
555 | |||
556 | for (j = 0; j < hw->num_scsi_msix_cpus; j++) { | ||
557 | n = (j % info->max_cpus) + k; | ||
558 | hw->sqset[i][j].intr_idx = entries[n].entry; | ||
559 | } | ||
560 | |||
561 | k += info->max_cpus; | ||
562 | } | ||
563 | |||
564 | kfree(entries); | ||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | void | ||
569 | csio_intr_enable(struct csio_hw *hw) | ||
570 | { | ||
571 | hw->intr_mode = CSIO_IM_NONE; | ||
572 | hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED; | ||
573 | |||
574 | /* Try MSIX, then MSI or fall back to INTx */ | ||
575 | if ((csio_msi == 2) && !csio_enable_msix(hw)) | ||
576 | hw->intr_mode = CSIO_IM_MSIX; | ||
577 | else { | ||
578 | /* Max iqs required based on #niqs configured in fw */ | ||
579 | if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || | ||
580 | !csio_is_hw_master(hw)) { | ||
581 | int extra = CSIO_EXTRA_MSI_IQS; | ||
582 | |||
583 | if (hw->cfg_niq < (hw->num_sqsets + extra)) { | ||
584 | csio_dbg(hw, "Reducing sqsets to %d\n", | ||
585 | hw->cfg_niq - extra); | ||
586 | csio_reduce_sqsets(hw, hw->cfg_niq - extra); | ||
587 | } | ||
588 | } | ||
589 | |||
590 | if ((csio_msi == 1) && !pci_enable_msi(hw->pdev)) | ||
591 | hw->intr_mode = CSIO_IM_MSI; | ||
592 | else | ||
593 | hw->intr_mode = CSIO_IM_INTX; | ||
594 | } | ||
595 | |||
596 | csio_dbg(hw, "Using %s interrupt mode.\n", | ||
597 | (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" : | ||
598 | ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx")); | ||
599 | } | ||
600 | |||
601 | void | ||
602 | csio_intr_disable(struct csio_hw *hw, bool free) | ||
603 | { | ||
604 | csio_hw_intr_disable(hw); | ||
605 | |||
606 | switch (hw->intr_mode) { | ||
607 | case CSIO_IM_MSIX: | ||
608 | csio_disable_msix(hw, free); | ||
609 | break; | ||
610 | case CSIO_IM_MSI: | ||
611 | if (free) | ||
612 | free_irq(hw->pdev->irq, hw); | ||
613 | pci_disable_msi(hw->pdev); | ||
614 | break; | ||
615 | case CSIO_IM_INTX: | ||
616 | if (free) | ||
617 | free_irq(hw->pdev->irq, hw); | ||
618 | break; | ||
619 | default: | ||
620 | break; | ||
621 | } | ||
622 | hw->intr_mode = CSIO_IM_NONE; | ||
623 | hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED; | ||
624 | } | ||
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c new file mode 100644 index 000000000000..551959e7324a --- /dev/null +++ b/drivers/scsi/csiostor/csio_lnode.c | |||
@@ -0,0 +1,2133 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/slab.h> | ||
38 | #include <linux/utsname.h> | ||
39 | #include <scsi/scsi_device.h> | ||
40 | #include <scsi/scsi_transport_fc.h> | ||
41 | #include <asm/unaligned.h> | ||
42 | #include <scsi/fc/fc_els.h> | ||
43 | #include <scsi/fc/fc_fs.h> | ||
44 | #include <scsi/fc/fc_gs.h> | ||
45 | #include <scsi/fc/fc_ms.h> | ||
46 | |||
47 | #include "csio_hw.h" | ||
48 | #include "csio_mb.h" | ||
49 | #include "csio_lnode.h" | ||
50 | #include "csio_rnode.h" | ||
51 | |||
52 | int csio_fcoe_rnodes = 1024; | ||
53 | int csio_fdmi_enable = 1; | ||
54 | |||
55 | #define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1) | ||
56 | |||
57 | /* Lnode SM declarations */ | ||
58 | static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev); | ||
59 | static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev); | ||
60 | static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev); | ||
61 | static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev); | ||
62 | |||
63 | static int csio_ln_mgmt_submit_req(struct csio_ioreq *, | ||
64 | void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *), | ||
65 | enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t); | ||
66 | |||
67 | /* LN event mapping */ | ||
68 | static enum csio_ln_ev fwevt_to_lnevt[] = { | ||
69 | CSIO_LNE_NONE, /* None */ | ||
70 | CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */ | ||
71 | CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */ | ||
72 | CSIO_LNE_NONE, /* PLOGI_RCVD */ | ||
73 | CSIO_LNE_NONE, /* PLOGO_RCVD */ | ||
74 | CSIO_LNE_NONE, /* PRLI_ACC_RCVD */ | ||
75 | CSIO_LNE_NONE, /* PRLI_RJT_RCVD */ | ||
76 | CSIO_LNE_NONE, /* PRLI_RCVD */ | ||
77 | CSIO_LNE_NONE, /* PRLO_RCVD */ | ||
78 | CSIO_LNE_NONE, /* NPORT_ID_CHGD */ | ||
79 | CSIO_LNE_LOGO, /* FLOGO_RCVD */ | ||
80 | CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */ | ||
81 | CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */ | ||
82 | CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */ | ||
83 | CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */ | ||
84 | CSIO_LNE_NONE, /* FDISC_RJT_RCVD */ | ||
85 | CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */ | ||
86 | CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */ | ||
87 | CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */ | ||
88 | CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */ | ||
89 | CSIO_LNE_NONE, /* PRLI_TMO */ | ||
90 | CSIO_LNE_NONE, /* ADISC_TMO */ | ||
91 | CSIO_LNE_NONE, /* RSCN_DEV_LOST */ | ||
92 | CSIO_LNE_NONE, /* SCR_ACC_RCVD */ | ||
93 | CSIO_LNE_NONE, /* ADISC_RJT_RCVD */ | ||
94 | CSIO_LNE_NONE, /* LOGO_SNT */ | ||
95 | CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */ | ||
96 | }; | ||
97 | |||
98 | #define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \ | ||
99 | CSIO_LNE_NONE : \ | ||
100 | fwevt_to_lnevt[_evt]) | ||
101 | |||
102 | #define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd) | ||
103 | #define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason) | ||
104 | #define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan) | ||
105 | #define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN)) | ||
106 | |||
107 | /* | ||
108 | * csio_ln_match_by_portid - lookup lnode using given portid. | ||
109 | * @hw: HW module | ||
110 | * @portid: port-id. | ||
111 | * | ||
112 | * If found, returns lnode matching given portid otherwise returns NULL. | ||
113 | */ | ||
114 | static struct csio_lnode * | ||
115 | csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid) | ||
116 | { | ||
117 | struct csio_lnode *ln = hw->rln; | ||
118 | struct list_head *tmp; | ||
119 | |||
120 | /* Match siblings lnode with portid */ | ||
121 | list_for_each(tmp, &hw->sln_head) { | ||
122 | ln = (struct csio_lnode *) tmp; | ||
123 | if (ln->portid == portid) | ||
124 | return ln; | ||
125 | } | ||
126 | |||
127 | return NULL; | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id. | ||
132 | * @hw - HW module | ||
133 | * @vnpi - vnp index. | ||
134 | * Returns - If found, returns lnode matching given vnp id | ||
135 | * otherwise returns NULL. | ||
136 | */ | ||
137 | static struct csio_lnode * | ||
138 | csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id) | ||
139 | { | ||
140 | struct list_head *tmp1, *tmp2; | ||
141 | struct csio_lnode *sln = NULL, *cln = NULL; | ||
142 | |||
143 | if (list_empty(&hw->sln_head)) { | ||
144 | CSIO_INC_STATS(hw, n_lnlkup_miss); | ||
145 | return NULL; | ||
146 | } | ||
147 | /* Traverse sibling lnodes */ | ||
148 | list_for_each(tmp1, &hw->sln_head) { | ||
149 | sln = (struct csio_lnode *) tmp1; | ||
150 | |||
151 | /* Match sibling lnode */ | ||
152 | if (sln->vnp_flowid == vnp_id) | ||
153 | return sln; | ||
154 | |||
155 | if (list_empty(&sln->cln_head)) | ||
156 | continue; | ||
157 | |||
158 | /* Traverse children lnodes */ | ||
159 | list_for_each(tmp2, &sln->cln_head) { | ||
160 | cln = (struct csio_lnode *) tmp2; | ||
161 | |||
162 | if (cln->vnp_flowid == vnp_id) | ||
163 | return cln; | ||
164 | } | ||
165 | } | ||
166 | CSIO_INC_STATS(hw, n_lnlkup_miss); | ||
167 | return NULL; | ||
168 | } | ||
169 | |||
170 | /** | ||
171 | * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn. | ||
172 | * @hw: HW module. | ||
173 | * @wwpn: WWPN. | ||
174 | * | ||
175 | * If found, returns lnode matching given wwpn, returns NULL otherwise. | ||
176 | */ | ||
177 | struct csio_lnode * | ||
178 | csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn) | ||
179 | { | ||
180 | struct list_head *tmp1, *tmp2; | ||
181 | struct csio_lnode *sln = NULL, *cln = NULL; | ||
182 | |||
183 | if (list_empty(&hw->sln_head)) { | ||
184 | CSIO_INC_STATS(hw, n_lnlkup_miss); | ||
185 | return NULL; | ||
186 | } | ||
187 | /* Traverse sibling lnodes */ | ||
188 | list_for_each(tmp1, &hw->sln_head) { | ||
189 | sln = (struct csio_lnode *) tmp1; | ||
190 | |||
191 | /* Match sibling lnode */ | ||
192 | if (!memcmp(csio_ln_wwpn(sln), wwpn, 8)) | ||
193 | return sln; | ||
194 | |||
195 | if (list_empty(&sln->cln_head)) | ||
196 | continue; | ||
197 | |||
198 | /* Traverse children lnodes */ | ||
199 | list_for_each(tmp2, &sln->cln_head) { | ||
200 | cln = (struct csio_lnode *) tmp2; | ||
201 | |||
202 | if (!memcmp(csio_ln_wwpn(cln), wwpn, 8)) | ||
203 | return cln; | ||
204 | } | ||
205 | } | ||
206 | return NULL; | ||
207 | } | ||
208 | |||
209 | /* FDMI */ | ||
210 | static void | ||
211 | csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op) | ||
212 | { | ||
213 | struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf; | ||
214 | cmd->ct_rev = FC_CT_REV; | ||
215 | cmd->ct_fs_type = type; | ||
216 | cmd->ct_fs_subtype = sub_type; | ||
217 | cmd->ct_cmd = op; | ||
218 | } | ||
219 | |||
220 | static int | ||
221 | csio_hostname(uint8_t *buf, size_t buf_len) | ||
222 | { | ||
223 | if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0) | ||
224 | return 0; | ||
225 | return -1; | ||
226 | } | ||
227 | |||
228 | static int | ||
229 | csio_osname(uint8_t *buf, size_t buf_len) | ||
230 | { | ||
231 | if (snprintf(buf, buf_len, "%s %s %s", | ||
232 | init_utsname()->sysname, | ||
233 | init_utsname()->release, | ||
234 | init_utsname()->version) > 0) | ||
235 | return 0; | ||
236 | |||
237 | return -1; | ||
238 | } | ||
239 | |||
240 | static inline void | ||
241 | csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len) | ||
242 | { | ||
243 | struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr; | ||
244 | ae->type = htons(type); | ||
245 | len += 4; /* includes attribute type and length */ | ||
246 | len = (len + 3) & ~3; /* should be multiple of 4 bytes */ | ||
247 | ae->len = htons(len); | ||
248 | memset(ae->value, 0, len - 4); | ||
249 | memcpy(ae->value, val, len); | ||
250 | *ptr += len; | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * csio_ln_fdmi_done - FDMI registeration completion | ||
255 | * @hw: HW context | ||
256 | * @fdmi_req: fdmi request | ||
257 | */ | ||
258 | static void | ||
259 | csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req) | ||
260 | { | ||
261 | void *cmd; | ||
262 | struct csio_lnode *ln = fdmi_req->lnode; | ||
263 | |||
264 | if (fdmi_req->wr_status != FW_SUCCESS) { | ||
265 | csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n", | ||
266 | fdmi_req->wr_status); | ||
267 | CSIO_INC_STATS(ln, n_fdmi_err); | ||
268 | } | ||
269 | |||
270 | cmd = fdmi_req->dma_buf.vaddr; | ||
271 | if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { | ||
272 | csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n", | ||
273 | csio_ct_reason(cmd), csio_ct_expl(cmd)); | ||
274 | } | ||
275 | } | ||
276 | |||
277 | /* | ||
278 | * csio_ln_fdmi_rhba_cbfn - RHBA completion | ||
279 | * @hw: HW context | ||
280 | * @fdmi_req: fdmi request | ||
281 | */ | ||
282 | static void | ||
283 | csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) | ||
284 | { | ||
285 | void *cmd; | ||
286 | uint8_t *pld; | ||
287 | uint32_t len = 0; | ||
288 | struct csio_lnode *ln = fdmi_req->lnode; | ||
289 | struct fs_fdmi_attrs *attrib_blk; | ||
290 | struct fc_fdmi_port_name *port_name; | ||
291 | uint8_t buf[64]; | ||
292 | uint32_t val; | ||
293 | uint8_t *fc4_type; | ||
294 | |||
295 | if (fdmi_req->wr_status != FW_SUCCESS) { | ||
296 | csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n", | ||
297 | fdmi_req->wr_status); | ||
298 | CSIO_INC_STATS(ln, n_fdmi_err); | ||
299 | } | ||
300 | |||
301 | cmd = fdmi_req->dma_buf.vaddr; | ||
302 | if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { | ||
303 | csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n", | ||
304 | csio_ct_reason(cmd), csio_ct_expl(cmd)); | ||
305 | } | ||
306 | |||
307 | if (!csio_is_rnode_ready(fdmi_req->rnode)) { | ||
308 | CSIO_INC_STATS(ln, n_fdmi_err); | ||
309 | return; | ||
310 | } | ||
311 | |||
312 | /* Prepare CT hdr for RPA cmd */ | ||
313 | memset(cmd, 0, FC_CT_HDR_LEN); | ||
314 | csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, htons(FC_FDMI_RPA)); | ||
315 | |||
316 | /* Prepare RPA payload */ | ||
317 | pld = (uint8_t *)csio_ct_get_pld(cmd); | ||
318 | port_name = (struct fc_fdmi_port_name *)pld; | ||
319 | memcpy(&port_name->portname, csio_ln_wwpn(ln), 8); | ||
320 | pld += sizeof(*port_name); | ||
321 | |||
322 | /* Start appending Port attributes */ | ||
323 | attrib_blk = (struct fs_fdmi_attrs *)pld; | ||
324 | attrib_blk->numattrs = 0; | ||
325 | len += sizeof(attrib_blk->numattrs); | ||
326 | pld += sizeof(attrib_blk->numattrs); | ||
327 | |||
328 | fc4_type = &buf[0]; | ||
329 | memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN); | ||
330 | fc4_type[2] = 1; | ||
331 | fc4_type[7] = 1; | ||
332 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES, | ||
333 | fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN); | ||
334 | attrib_blk->numattrs++; | ||
335 | val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); | ||
336 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED, | ||
337 | (uint8_t *)&val, | ||
338 | FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN); | ||
339 | attrib_blk->numattrs++; | ||
340 | |||
341 | if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G) | ||
342 | val = htonl(FC_PORTSPEED_1GBIT); | ||
343 | else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G) | ||
344 | val = htonl(FC_PORTSPEED_10GBIT); | ||
345 | else | ||
346 | val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN); | ||
347 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED, | ||
348 | (uint8_t *)&val, | ||
349 | FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN); | ||
350 | attrib_blk->numattrs++; | ||
351 | |||
352 | val = htonl(ln->ln_sparm.csp.sp_bb_data); | ||
353 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE, | ||
354 | (uint8_t *)&val, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN); | ||
355 | attrib_blk->numattrs++; | ||
356 | |||
357 | strcpy(buf, "csiostor"); | ||
358 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf, | ||
359 | (uint16_t)strlen(buf)); | ||
360 | attrib_blk->numattrs++; | ||
361 | |||
362 | if (!csio_hostname(buf, sizeof(buf))) { | ||
363 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME, | ||
364 | buf, (uint16_t)strlen(buf)); | ||
365 | attrib_blk->numattrs++; | ||
366 | } | ||
367 | attrib_blk->numattrs = ntohl(attrib_blk->numattrs); | ||
368 | len = (uint32_t)(pld - (uint8_t *)cmd); | ||
369 | |||
370 | /* Submit FDMI RPA request */ | ||
371 | spin_lock_irq(&hw->lock); | ||
372 | if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done, | ||
373 | FCOE_CT, &fdmi_req->dma_buf, len)) { | ||
374 | CSIO_INC_STATS(ln, n_fdmi_err); | ||
375 | csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n"); | ||
376 | } | ||
377 | spin_unlock_irq(&hw->lock); | ||
378 | } | ||
379 | |||
380 | /* | ||
381 | * csio_ln_fdmi_dprt_cbfn - DPRT completion | ||
382 | * @hw: HW context | ||
383 | * @fdmi_req: fdmi request | ||
384 | */ | ||
385 | static void | ||
386 | csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) | ||
387 | { | ||
388 | void *cmd; | ||
389 | uint8_t *pld; | ||
390 | uint32_t len = 0; | ||
391 | uint32_t maxpayload = htonl(65536); | ||
392 | struct fc_fdmi_hba_identifier *hbaid; | ||
393 | struct csio_lnode *ln = fdmi_req->lnode; | ||
394 | struct fc_fdmi_rpl *reg_pl; | ||
395 | struct fs_fdmi_attrs *attrib_blk; | ||
396 | uint8_t buf[64]; | ||
397 | |||
398 | if (fdmi_req->wr_status != FW_SUCCESS) { | ||
399 | csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n", | ||
400 | fdmi_req->wr_status); | ||
401 | CSIO_INC_STATS(ln, n_fdmi_err); | ||
402 | } | ||
403 | |||
404 | if (!csio_is_rnode_ready(fdmi_req->rnode)) { | ||
405 | CSIO_INC_STATS(ln, n_fdmi_err); | ||
406 | return; | ||
407 | } | ||
408 | cmd = fdmi_req->dma_buf.vaddr; | ||
409 | if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { | ||
410 | csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n", | ||
411 | csio_ct_reason(cmd), csio_ct_expl(cmd)); | ||
412 | } | ||
413 | |||
414 | /* Prepare CT hdr for RHBA cmd */ | ||
415 | memset(cmd, 0, FC_CT_HDR_LEN); | ||
416 | csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, htons(FC_FDMI_RHBA)); | ||
417 | len = FC_CT_HDR_LEN; | ||
418 | |||
419 | /* Prepare RHBA payload */ | ||
420 | pld = (uint8_t *)csio_ct_get_pld(cmd); | ||
421 | hbaid = (struct fc_fdmi_hba_identifier *)pld; | ||
422 | memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */ | ||
423 | pld += sizeof(*hbaid); | ||
424 | |||
425 | /* Register one port per hba */ | ||
426 | reg_pl = (struct fc_fdmi_rpl *)pld; | ||
427 | reg_pl->numport = ntohl(1); | ||
428 | memcpy(®_pl->port[0].portname, csio_ln_wwpn(ln), 8); | ||
429 | pld += sizeof(*reg_pl); | ||
430 | |||
431 | /* Start appending HBA attributes hba */ | ||
432 | attrib_blk = (struct fs_fdmi_attrs *)pld; | ||
433 | attrib_blk->numattrs = 0; | ||
434 | len += sizeof(attrib_blk->numattrs); | ||
435 | pld += sizeof(attrib_blk->numattrs); | ||
436 | |||
437 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln), | ||
438 | FC_FDMI_HBA_ATTR_NODENAME_LEN); | ||
439 | attrib_blk->numattrs++; | ||
440 | |||
441 | memset(buf, 0, sizeof(buf)); | ||
442 | |||
443 | strcpy(buf, "Chelsio Communications"); | ||
444 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf, | ||
445 | (uint16_t)strlen(buf)); | ||
446 | attrib_blk->numattrs++; | ||
447 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER, | ||
448 | hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn)); | ||
449 | attrib_blk->numattrs++; | ||
450 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id, | ||
451 | (uint16_t)sizeof(hw->vpd.id)); | ||
452 | attrib_blk->numattrs++; | ||
453 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION, | ||
454 | hw->model_desc, (uint16_t)strlen(hw->model_desc)); | ||
455 | attrib_blk->numattrs++; | ||
456 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION, | ||
457 | hw->hw_ver, (uint16_t)sizeof(hw->hw_ver)); | ||
458 | attrib_blk->numattrs++; | ||
459 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION, | ||
460 | hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str)); | ||
461 | attrib_blk->numattrs++; | ||
462 | |||
463 | if (!csio_osname(buf, sizeof(buf))) { | ||
464 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION, | ||
465 | buf, (uint16_t)strlen(buf)); | ||
466 | attrib_blk->numattrs++; | ||
467 | } | ||
468 | |||
469 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD, | ||
470 | (uint8_t *)&maxpayload, | ||
471 | FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN); | ||
472 | len = (uint32_t)(pld - (uint8_t *)cmd); | ||
473 | attrib_blk->numattrs++; | ||
474 | attrib_blk->numattrs = ntohl(attrib_blk->numattrs); | ||
475 | |||
476 | /* Submit FDMI RHBA request */ | ||
477 | spin_lock_irq(&hw->lock); | ||
478 | if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn, | ||
479 | FCOE_CT, &fdmi_req->dma_buf, len)) { | ||
480 | CSIO_INC_STATS(ln, n_fdmi_err); | ||
481 | csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n"); | ||
482 | } | ||
483 | spin_unlock_irq(&hw->lock); | ||
484 | } | ||
485 | |||
486 | /* | ||
487 | * csio_ln_fdmi_dhba_cbfn - DHBA completion | ||
488 | * @hw: HW context | ||
489 | * @fdmi_req: fdmi request | ||
490 | */ | ||
491 | static void | ||
492 | csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) | ||
493 | { | ||
494 | struct csio_lnode *ln = fdmi_req->lnode; | ||
495 | void *cmd; | ||
496 | struct fc_fdmi_port_name *port_name; | ||
497 | uint32_t len; | ||
498 | |||
499 | if (fdmi_req->wr_status != FW_SUCCESS) { | ||
500 | csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n", | ||
501 | fdmi_req->wr_status); | ||
502 | CSIO_INC_STATS(ln, n_fdmi_err); | ||
503 | } | ||
504 | |||
505 | if (!csio_is_rnode_ready(fdmi_req->rnode)) { | ||
506 | CSIO_INC_STATS(ln, n_fdmi_err); | ||
507 | return; | ||
508 | } | ||
509 | cmd = fdmi_req->dma_buf.vaddr; | ||
510 | if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { | ||
511 | csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n", | ||
512 | csio_ct_reason(cmd), csio_ct_expl(cmd)); | ||
513 | } | ||
514 | |||
515 | /* Send FDMI cmd to de-register any Port attributes if registered | ||
516 | * before | ||
517 | */ | ||
518 | |||
519 | /* Prepare FDMI DPRT cmd */ | ||
520 | memset(cmd, 0, FC_CT_HDR_LEN); | ||
521 | csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, htons(FC_FDMI_DPRT)); | ||
522 | len = FC_CT_HDR_LEN; | ||
523 | port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd); | ||
524 | memcpy(&port_name->portname, csio_ln_wwpn(ln), 8); | ||
525 | len += sizeof(*port_name); | ||
526 | |||
527 | /* Submit FDMI request */ | ||
528 | spin_lock_irq(&hw->lock); | ||
529 | if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn, | ||
530 | FCOE_CT, &fdmi_req->dma_buf, len)) { | ||
531 | CSIO_INC_STATS(ln, n_fdmi_err); | ||
532 | csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n"); | ||
533 | } | ||
534 | spin_unlock_irq(&hw->lock); | ||
535 | } | ||
536 | |||
537 | /** | ||
538 | * csio_ln_fdmi_start - Start an FDMI request. | ||
539 | * @ln: lnode | ||
540 | * @context: session context | ||
541 | * | ||
542 | * Issued with lock held. | ||
543 | */ | ||
544 | int | ||
545 | csio_ln_fdmi_start(struct csio_lnode *ln, void *context) | ||
546 | { | ||
547 | struct csio_ioreq *fdmi_req; | ||
548 | struct csio_rnode *fdmi_rn = (struct csio_rnode *)context; | ||
549 | void *cmd; | ||
550 | struct fc_fdmi_hba_identifier *hbaid; | ||
551 | uint32_t len; | ||
552 | |||
553 | if (!(ln->flags & CSIO_LNF_FDMI_ENABLE)) | ||
554 | return -EPROTONOSUPPORT; | ||
555 | |||
556 | if (!csio_is_rnode_ready(fdmi_rn)) | ||
557 | CSIO_INC_STATS(ln, n_fdmi_err); | ||
558 | |||
559 | /* Send FDMI cmd to de-register any HBA attributes if registered | ||
560 | * before | ||
561 | */ | ||
562 | |||
563 | fdmi_req = ln->mgmt_req; | ||
564 | fdmi_req->lnode = ln; | ||
565 | fdmi_req->rnode = fdmi_rn; | ||
566 | |||
567 | /* Prepare FDMI DHBA cmd */ | ||
568 | cmd = fdmi_req->dma_buf.vaddr; | ||
569 | memset(cmd, 0, FC_CT_HDR_LEN); | ||
570 | csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, htons(FC_FDMI_DHBA)); | ||
571 | len = FC_CT_HDR_LEN; | ||
572 | |||
573 | hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd); | ||
574 | memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); | ||
575 | len += sizeof(*hbaid); | ||
576 | |||
577 | /* Submit FDMI request */ | ||
578 | if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn, | ||
579 | FCOE_CT, &fdmi_req->dma_buf, len)) { | ||
580 | CSIO_INC_STATS(ln, n_fdmi_err); | ||
581 | csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n"); | ||
582 | } | ||
583 | |||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | /* | ||
588 | * csio_ln_vnp_read_cbfn - vnp read completion handler. | ||
589 | * @hw: HW lnode | ||
590 | * @cbfn: Completion handler. | ||
591 | * | ||
592 | * Reads vnp response and updates ln parameters. | ||
593 | */ | ||
594 | static void | ||
595 | csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp) | ||
596 | { | ||
597 | struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv); | ||
598 | struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); | ||
599 | struct fc_els_csp *csp; | ||
600 | struct fc_els_cssp *clsp; | ||
601 | enum fw_retval retval; | ||
602 | |||
603 | retval = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)); | ||
604 | if (retval != FW_SUCCESS) { | ||
605 | csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval); | ||
606 | mempool_free(mbp, hw->mb_mempool); | ||
607 | return; | ||
608 | } | ||
609 | |||
610 | spin_lock_irq(&hw->lock); | ||
611 | |||
612 | memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac)); | ||
613 | memcpy(&ln->nport_id, &rsp->vnport_mac[3], | ||
614 | sizeof(uint8_t)*3); | ||
615 | ln->nport_id = ntohl(ln->nport_id); | ||
616 | ln->nport_id = ln->nport_id>>8; | ||
617 | |||
618 | /* Update WWNs */ | ||
619 | /* | ||
620 | * This may look like a duplication of what csio_fcoe_enable_link() | ||
621 | * does, but is absolutely necessary if the vnpi changes between | ||
622 | * a FCOE LINK UP and FCOE LINK DOWN. | ||
623 | */ | ||
624 | memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8); | ||
625 | memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8); | ||
626 | |||
627 | /* Copy common sparam */ | ||
628 | csp = (struct fc_els_csp *)rsp->cmn_srv_parms; | ||
629 | ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver; | ||
630 | ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver; | ||
631 | ln->ln_sparm.csp.sp_bb_cred = ntohs(csp->sp_bb_cred); | ||
632 | ln->ln_sparm.csp.sp_features = ntohs(csp->sp_features); | ||
633 | ln->ln_sparm.csp.sp_bb_data = ntohs(csp->sp_bb_data); | ||
634 | ln->ln_sparm.csp.sp_r_a_tov = ntohl(csp->sp_r_a_tov); | ||
635 | ln->ln_sparm.csp.sp_e_d_tov = ntohl(csp->sp_e_d_tov); | ||
636 | |||
637 | /* Copy word 0 & word 1 of class sparam */ | ||
638 | clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1; | ||
639 | ln->ln_sparm.clsp[2].cp_class = ntohs(clsp->cp_class); | ||
640 | ln->ln_sparm.clsp[2].cp_init = ntohs(clsp->cp_init); | ||
641 | ln->ln_sparm.clsp[2].cp_recip = ntohs(clsp->cp_recip); | ||
642 | ln->ln_sparm.clsp[2].cp_rdfs = ntohs(clsp->cp_rdfs); | ||
643 | |||
644 | spin_unlock_irq(&hw->lock); | ||
645 | |||
646 | mempool_free(mbp, hw->mb_mempool); | ||
647 | |||
648 | /* Send an event to update local attribs */ | ||
649 | csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE); | ||
650 | } | ||
651 | |||
652 | /* | ||
653 | * csio_ln_vnp_read - Read vnp params. | ||
654 | * @ln: lnode | ||
655 | * @cbfn: Completion handler. | ||
656 | * | ||
657 | * Issued with lock held. | ||
658 | */ | ||
659 | static int | ||
660 | csio_ln_vnp_read(struct csio_lnode *ln, | ||
661 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
662 | { | ||
663 | struct csio_hw *hw = ln->hwp; | ||
664 | struct csio_mb *mbp; | ||
665 | |||
666 | /* Allocate Mbox request */ | ||
667 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
668 | if (!mbp) { | ||
669 | CSIO_INC_STATS(hw, n_err_nomem); | ||
670 | return -ENOMEM; | ||
671 | } | ||
672 | |||
673 | /* Prepare VNP Command */ | ||
674 | csio_fcoe_vnp_read_init_mb(ln, mbp, | ||
675 | CSIO_MB_DEFAULT_TMO, | ||
676 | ln->fcf_flowid, | ||
677 | ln->vnp_flowid, | ||
678 | cbfn); | ||
679 | |||
680 | /* Issue MBOX cmd */ | ||
681 | if (csio_mb_issue(hw, mbp)) { | ||
682 | csio_err(hw, "Failed to issue mbox FCoE VNP command\n"); | ||
683 | mempool_free(mbp, hw->mb_mempool); | ||
684 | return -EINVAL; | ||
685 | } | ||
686 | |||
687 | return 0; | ||
688 | } | ||
689 | |||
690 | /* | ||
691 | * csio_fcoe_enable_link - Enable fcoe link. | ||
692 | * @ln: lnode | ||
693 | * @enable: enable/disable | ||
694 | * Issued with lock held. | ||
695 | * Issues mbox cmd to bring up FCOE link on port associated with given ln. | ||
696 | */ | ||
697 | static int | ||
698 | csio_fcoe_enable_link(struct csio_lnode *ln, bool enable) | ||
699 | { | ||
700 | struct csio_hw *hw = ln->hwp; | ||
701 | struct csio_mb *mbp; | ||
702 | enum fw_retval retval; | ||
703 | uint8_t portid; | ||
704 | uint8_t sub_op; | ||
705 | struct fw_fcoe_link_cmd *lcmd; | ||
706 | int i; | ||
707 | |||
708 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
709 | if (!mbp) { | ||
710 | CSIO_INC_STATS(hw, n_err_nomem); | ||
711 | return -ENOMEM; | ||
712 | } | ||
713 | |||
714 | portid = ln->portid; | ||
715 | sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN; | ||
716 | |||
717 | csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n", | ||
718 | sub_op ? "UP" : "DOWN", portid); | ||
719 | |||
720 | csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, | ||
721 | portid, sub_op, 0, 0, 0, NULL); | ||
722 | |||
723 | if (csio_mb_issue(hw, mbp)) { | ||
724 | csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n", | ||
725 | portid); | ||
726 | mempool_free(mbp, hw->mb_mempool); | ||
727 | return -EINVAL; | ||
728 | } | ||
729 | |||
730 | retval = csio_mb_fw_retval(mbp); | ||
731 | if (retval != FW_SUCCESS) { | ||
732 | csio_err(hw, | ||
733 | "FCOE LINK %s cmd on port[%d] failed with " | ||
734 | "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval); | ||
735 | mempool_free(mbp, hw->mb_mempool); | ||
736 | return -EINVAL; | ||
737 | } | ||
738 | |||
739 | if (!enable) | ||
740 | goto out; | ||
741 | |||
742 | lcmd = (struct fw_fcoe_link_cmd *)mbp->mb; | ||
743 | |||
744 | memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8); | ||
745 | memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8); | ||
746 | |||
747 | for (i = 0; i < CSIO_MAX_PPORTS; i++) | ||
748 | if (hw->pport[i].portid == portid) | ||
749 | memcpy(hw->pport[i].mac, lcmd->phy_mac, 6); | ||
750 | |||
751 | out: | ||
752 | mempool_free(mbp, hw->mb_mempool); | ||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | /* | ||
757 | * csio_ln_read_fcf_cbfn - Read fcf parameters | ||
758 | * @ln: lnode | ||
759 | * | ||
760 | * read fcf response and Update ln fcf information. | ||
761 | */ | ||
762 | static void | ||
763 | csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp) | ||
764 | { | ||
765 | struct csio_lnode *ln = (struct csio_lnode *)mbp->priv; | ||
766 | struct csio_fcf_info *fcf_info; | ||
767 | struct fw_fcoe_fcf_cmd *rsp = | ||
768 | (struct fw_fcoe_fcf_cmd *)(mbp->mb); | ||
769 | enum fw_retval retval; | ||
770 | |||
771 | retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); | ||
772 | if (retval != FW_SUCCESS) { | ||
773 | csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n", | ||
774 | retval); | ||
775 | mempool_free(mbp, hw->mb_mempool); | ||
776 | return; | ||
777 | } | ||
778 | |||
779 | spin_lock_irq(&hw->lock); | ||
780 | fcf_info = ln->fcfinfo; | ||
781 | fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET( | ||
782 | ntohs(rsp->priority_pkd)); | ||
783 | fcf_info->vf_id = ntohs(rsp->vf_id); | ||
784 | fcf_info->vlan_id = rsp->vlan_id; | ||
785 | fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size); | ||
786 | fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv); | ||
787 | fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi)); | ||
788 | fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid); | ||
789 | fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid); | ||
790 | fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid); | ||
791 | fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid); | ||
792 | memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map)); | ||
793 | memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac)); | ||
794 | memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id)); | ||
795 | memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric)); | ||
796 | memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac)); | ||
797 | |||
798 | spin_unlock_irq(&hw->lock); | ||
799 | |||
800 | mempool_free(mbp, hw->mb_mempool); | ||
801 | } | ||
802 | |||
803 | /* | ||
804 | * csio_ln_read_fcf_entry - Read fcf entry. | ||
805 | * @ln: lnode | ||
806 | * @cbfn: Completion handler. | ||
807 | * | ||
808 | * Issued with lock held. | ||
809 | */ | ||
810 | static int | ||
811 | csio_ln_read_fcf_entry(struct csio_lnode *ln, | ||
812 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
813 | { | ||
814 | struct csio_hw *hw = ln->hwp; | ||
815 | struct csio_mb *mbp; | ||
816 | |||
817 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
818 | if (!mbp) { | ||
819 | CSIO_INC_STATS(hw, n_err_nomem); | ||
820 | return -ENOMEM; | ||
821 | } | ||
822 | |||
823 | /* Get FCoE FCF information */ | ||
824 | csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, | ||
825 | ln->portid, ln->fcf_flowid, cbfn); | ||
826 | |||
827 | if (csio_mb_issue(hw, mbp)) { | ||
828 | csio_err(hw, "failed to issue FCOE FCF cmd\n"); | ||
829 | mempool_free(mbp, hw->mb_mempool); | ||
830 | return -EINVAL; | ||
831 | } | ||
832 | |||
833 | return 0; | ||
834 | } | ||
835 | |||
836 | /* | ||
837 | * csio_handle_link_up - Logical Linkup event. | ||
838 | * @hw - HW module. | ||
839 | * @portid - Physical port number | ||
840 | * @fcfi - FCF index. | ||
841 | * @vnpi - VNP index. | ||
842 | * Returns - none. | ||
843 | * | ||
844 | * This event is received from FW, when virtual link is established between | ||
845 | * Physical port[ENode] and FCF. If its new vnpi, then local node object is | ||
846 | * created on this FCF and set to [ONLINE] state. | ||
847 | * Lnode waits for FW_RDEV_CMD event to be received indicating that | ||
848 | * Fabric login is completed and lnode moves to [READY] state. | ||
849 | * | ||
850 | * This called with hw lock held | ||
851 | */ | ||
852 | static void | ||
853 | csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi, | ||
854 | uint32_t vnpi) | ||
855 | { | ||
856 | struct csio_lnode *ln = NULL; | ||
857 | |||
858 | /* Lookup lnode based on vnpi */ | ||
859 | ln = csio_ln_lookup_by_vnpi(hw, vnpi); | ||
860 | if (!ln) { | ||
861 | /* Pick lnode based on portid */ | ||
862 | ln = csio_ln_lookup_by_portid(hw, portid); | ||
863 | if (!ln) { | ||
864 | csio_err(hw, "failed to lookup fcoe lnode on port:%d\n", | ||
865 | portid); | ||
866 | CSIO_DB_ASSERT(0); | ||
867 | return; | ||
868 | } | ||
869 | |||
870 | /* Check if lnode has valid vnp flowid */ | ||
871 | if (ln->vnp_flowid != CSIO_INVALID_IDX) { | ||
872 | /* New VN-Port */ | ||
873 | spin_unlock_irq(&hw->lock); | ||
874 | csio_lnode_alloc(hw); | ||
875 | spin_lock_irq(&hw->lock); | ||
876 | if (!ln) { | ||
877 | csio_err(hw, | ||
878 | "failed to allocate fcoe lnode" | ||
879 | "for port:%d vnpi:x%x\n", | ||
880 | portid, vnpi); | ||
881 | CSIO_DB_ASSERT(0); | ||
882 | return; | ||
883 | } | ||
884 | ln->portid = portid; | ||
885 | } | ||
886 | ln->vnp_flowid = vnpi; | ||
887 | ln->dev_num &= ~0xFFFF; | ||
888 | ln->dev_num |= vnpi; | ||
889 | } | ||
890 | |||
891 | /*Initialize fcfi */ | ||
892 | ln->fcf_flowid = fcfi; | ||
893 | |||
894 | csio_info(hw, "Port:%d - FCOE LINK UP\n", portid); | ||
895 | |||
896 | CSIO_INC_STATS(ln, n_link_up); | ||
897 | |||
898 | /* Send LINKUP event to SM */ | ||
899 | csio_post_event(&ln->sm, CSIO_LNE_LINKUP); | ||
900 | } | ||
901 | |||
902 | /* | ||
903 | * csio_post_event_rns | ||
904 | * @ln - FCOE lnode | ||
905 | * @evt - Given rnode event | ||
906 | * Returns - none | ||
907 | * | ||
908 | * Posts given rnode event to all FCOE rnodes connected with given Lnode. | ||
909 | * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE | ||
910 | * event. | ||
911 | * | ||
912 | * This called with hw lock held | ||
913 | */ | ||
914 | static void | ||
915 | csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt) | ||
916 | { | ||
917 | struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; | ||
918 | struct list_head *tmp, *next; | ||
919 | struct csio_rnode *rn; | ||
920 | |||
921 | list_for_each_safe(tmp, next, &rnhead->sm.sm_list) { | ||
922 | rn = (struct csio_rnode *) tmp; | ||
923 | csio_post_event(&rn->sm, evt); | ||
924 | } | ||
925 | } | ||
926 | |||
927 | /* | ||
928 | * csio_cleanup_rns | ||
929 | * @ln - FCOE lnode | ||
930 | * Returns - none | ||
931 | * | ||
932 | * Frees all FCOE rnodes connected with given Lnode. | ||
933 | * | ||
934 | * This called with hw lock held | ||
935 | */ | ||
936 | static void | ||
937 | csio_cleanup_rns(struct csio_lnode *ln) | ||
938 | { | ||
939 | struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; | ||
940 | struct list_head *tmp, *next_rn; | ||
941 | struct csio_rnode *rn; | ||
942 | |||
943 | list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) { | ||
944 | rn = (struct csio_rnode *) tmp; | ||
945 | csio_put_rnode(ln, rn); | ||
946 | } | ||
947 | |||
948 | } | ||
949 | |||
950 | /* | ||
951 | * csio_post_event_lns | ||
952 | * @ln - FCOE lnode | ||
953 | * @evt - Given lnode event | ||
954 | * Returns - none | ||
955 | * | ||
956 | * Posts given lnode event to all FCOE lnodes connected with given Lnode. | ||
957 | * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE | ||
958 | * event. | ||
959 | * | ||
960 | * This called with hw lock held | ||
961 | */ | ||
962 | static void | ||
963 | csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt) | ||
964 | { | ||
965 | struct list_head *tmp; | ||
966 | struct csio_lnode *cln, *sln; | ||
967 | |||
968 | /* If NPIV lnode, send evt only to that and return */ | ||
969 | if (csio_is_npiv_ln(ln)) { | ||
970 | csio_post_event(&ln->sm, evt); | ||
971 | return; | ||
972 | } | ||
973 | |||
974 | sln = ln; | ||
975 | /* Traverse children lnodes list and send evt */ | ||
976 | list_for_each(tmp, &sln->cln_head) { | ||
977 | cln = (struct csio_lnode *) tmp; | ||
978 | csio_post_event(&cln->sm, evt); | ||
979 | } | ||
980 | |||
981 | /* Send evt to parent lnode */ | ||
982 | csio_post_event(&ln->sm, evt); | ||
983 | } | ||
984 | |||
985 | /* | ||
986 | * csio_ln_down - Lcoal nport is down | ||
987 | * @ln - FCOE Lnode | ||
988 | * Returns - none | ||
989 | * | ||
990 | * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes. | ||
991 | * | ||
992 | * This called with hw lock held | ||
993 | */ | ||
994 | static void | ||
995 | csio_ln_down(struct csio_lnode *ln) | ||
996 | { | ||
997 | csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN); | ||
998 | } | ||
999 | |||
1000 | /* | ||
1001 | * csio_handle_link_down - Logical Linkdown event. | ||
1002 | * @hw - HW module. | ||
1003 | * @portid - Physical port number | ||
1004 | * @fcfi - FCF index. | ||
1005 | * @vnpi - VNP index. | ||
1006 | * Returns - none | ||
1007 | * | ||
1008 | * This event is received from FW, when virtual link goes down between | ||
1009 | * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on | ||
1010 | * this vnpi[VN-Port] will be de-instantiated. | ||
1011 | * | ||
1012 | * This called with hw lock held | ||
1013 | */ | ||
1014 | static void | ||
1015 | csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi, | ||
1016 | uint32_t vnpi) | ||
1017 | { | ||
1018 | struct csio_fcf_info *fp; | ||
1019 | struct csio_lnode *ln; | ||
1020 | |||
1021 | /* Lookup lnode based on vnpi */ | ||
1022 | ln = csio_ln_lookup_by_vnpi(hw, vnpi); | ||
1023 | if (ln) { | ||
1024 | fp = ln->fcfinfo; | ||
1025 | CSIO_INC_STATS(ln, n_link_down); | ||
1026 | |||
1027 | /*Warn if linkdown received if lnode is not in ready state */ | ||
1028 | if (!csio_is_lnode_ready(ln)) { | ||
1029 | csio_ln_warn(ln, | ||
1030 | "warn: FCOE link is already in offline " | ||
1031 | "Ignoring Fcoe linkdown event on portid %d\n", | ||
1032 | portid); | ||
1033 | CSIO_INC_STATS(ln, n_evt_drop); | ||
1034 | return; | ||
1035 | } | ||
1036 | |||
1037 | /* Verify portid */ | ||
1038 | if (fp->portid != portid) { | ||
1039 | csio_ln_warn(ln, | ||
1040 | "warn: FCOE linkdown recv with " | ||
1041 | "invalid port %d\n", portid); | ||
1042 | CSIO_INC_STATS(ln, n_evt_drop); | ||
1043 | return; | ||
1044 | } | ||
1045 | |||
1046 | /* verify fcfi */ | ||
1047 | if (ln->fcf_flowid != fcfi) { | ||
1048 | csio_ln_warn(ln, | ||
1049 | "warn: FCOE linkdown recv with " | ||
1050 | "invalid fcfi x%x\n", fcfi); | ||
1051 | CSIO_INC_STATS(ln, n_evt_drop); | ||
1052 | return; | ||
1053 | } | ||
1054 | |||
1055 | csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid); | ||
1056 | |||
1057 | /* Send LINK_DOWN event to lnode s/m */ | ||
1058 | csio_ln_down(ln); | ||
1059 | |||
1060 | return; | ||
1061 | } else { | ||
1062 | csio_warn(hw, | ||
1063 | "warn: FCOE linkdown recv with invalid vnpi x%x\n", | ||
1064 | vnpi); | ||
1065 | CSIO_INC_STATS(hw, n_evt_drop); | ||
1066 | } | ||
1067 | } | ||
1068 | |||
1069 | /* | ||
1070 | * csio_is_lnode_ready - Checks FCOE lnode is in ready state. | ||
1071 | * @ln: Lnode module | ||
1072 | * | ||
1073 | * Returns True if FCOE lnode is in ready state. | ||
1074 | */ | ||
1075 | int | ||
1076 | csio_is_lnode_ready(struct csio_lnode *ln) | ||
1077 | { | ||
1078 | return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)); | ||
1079 | } | ||
1080 | |||
1081 | /*****************************************************************************/ | ||
1082 | /* START: Lnode SM */ | ||
1083 | /*****************************************************************************/ | ||
1084 | /* | ||
1085 | * csio_lns_uninit - The request in uninit state. | ||
1086 | * @ln - FCOE lnode. | ||
1087 | * @evt - Event to be processed. | ||
1088 | * | ||
1089 | * Process the given lnode event which is currently in "uninit" state. | ||
1090 | * Invoked with HW lock held. | ||
1091 | * Return - none. | ||
1092 | */ | ||
1093 | static void | ||
1094 | csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt) | ||
1095 | { | ||
1096 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
1097 | struct csio_lnode *rln = hw->rln; | ||
1098 | int rv; | ||
1099 | |||
1100 | CSIO_INC_STATS(ln, n_evt_sm[evt]); | ||
1101 | switch (evt) { | ||
1102 | case CSIO_LNE_LINKUP: | ||
1103 | csio_set_state(&ln->sm, csio_lns_online); | ||
1104 | /* Read FCF only for physical lnode */ | ||
1105 | if (csio_is_phys_ln(ln)) { | ||
1106 | rv = csio_ln_read_fcf_entry(ln, | ||
1107 | csio_ln_read_fcf_cbfn); | ||
1108 | if (rv != 0) { | ||
1109 | /* TODO: Send HW RESET event */ | ||
1110 | CSIO_INC_STATS(ln, n_err); | ||
1111 | break; | ||
1112 | } | ||
1113 | |||
1114 | /* Add FCF record */ | ||
1115 | list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead); | ||
1116 | } | ||
1117 | |||
1118 | rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn); | ||
1119 | if (rv != 0) { | ||
1120 | /* TODO: Send HW RESET event */ | ||
1121 | CSIO_INC_STATS(ln, n_err); | ||
1122 | } | ||
1123 | break; | ||
1124 | |||
1125 | case CSIO_LNE_DOWN_LINK: | ||
1126 | break; | ||
1127 | |||
1128 | default: | ||
1129 | csio_ln_dbg(ln, | ||
1130 | "unexp ln event %d recv from did:x%x in " | ||
1131 | "ln state[uninit].\n", evt, ln->nport_id); | ||
1132 | CSIO_INC_STATS(ln, n_evt_unexp); | ||
1133 | break; | ||
1134 | } /* switch event */ | ||
1135 | } | ||
1136 | |||
1137 | /* | ||
1138 | * csio_lns_online - The request in online state. | ||
1139 | * @ln - FCOE lnode. | ||
1140 | * @evt - Event to be processed. | ||
1141 | * | ||
1142 | * Process the given lnode event which is currently in "online" state. | ||
1143 | * Invoked with HW lock held. | ||
1144 | * Return - none. | ||
1145 | */ | ||
1146 | static void | ||
1147 | csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt) | ||
1148 | { | ||
1149 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
1150 | |||
1151 | CSIO_INC_STATS(ln, n_evt_sm[evt]); | ||
1152 | switch (evt) { | ||
1153 | case CSIO_LNE_LINKUP: | ||
1154 | csio_ln_warn(ln, | ||
1155 | "warn: FCOE link is up already " | ||
1156 | "Ignoring linkup on port:%d\n", ln->portid); | ||
1157 | CSIO_INC_STATS(ln, n_evt_drop); | ||
1158 | break; | ||
1159 | |||
1160 | case CSIO_LNE_FAB_INIT_DONE: | ||
1161 | csio_set_state(&ln->sm, csio_lns_ready); | ||
1162 | |||
1163 | spin_unlock_irq(&hw->lock); | ||
1164 | csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP); | ||
1165 | spin_lock_irq(&hw->lock); | ||
1166 | |||
1167 | break; | ||
1168 | |||
1169 | case CSIO_LNE_LINK_DOWN: | ||
1170 | /* Fall through */ | ||
1171 | case CSIO_LNE_DOWN_LINK: | ||
1172 | csio_set_state(&ln->sm, csio_lns_uninit); | ||
1173 | if (csio_is_phys_ln(ln)) { | ||
1174 | /* Remove FCF entry */ | ||
1175 | list_del_init(&ln->fcfinfo->list); | ||
1176 | } | ||
1177 | break; | ||
1178 | |||
1179 | default: | ||
1180 | csio_ln_dbg(ln, | ||
1181 | "unexp ln event %d recv from did:x%x in " | ||
1182 | "ln state[uninit].\n", evt, ln->nport_id); | ||
1183 | CSIO_INC_STATS(ln, n_evt_unexp); | ||
1184 | |||
1185 | break; | ||
1186 | } /* switch event */ | ||
1187 | } | ||
1188 | |||
1189 | /* | ||
1190 | * csio_lns_ready - The request in ready state. | ||
1191 | * @ln - FCOE lnode. | ||
1192 | * @evt - Event to be processed. | ||
1193 | * | ||
1194 | * Process the given lnode event which is currently in "ready" state. | ||
1195 | * Invoked with HW lock held. | ||
1196 | * Return - none. | ||
1197 | */ | ||
1198 | static void | ||
1199 | csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt) | ||
1200 | { | ||
1201 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
1202 | |||
1203 | CSIO_INC_STATS(ln, n_evt_sm[evt]); | ||
1204 | switch (evt) { | ||
1205 | case CSIO_LNE_FAB_INIT_DONE: | ||
1206 | csio_ln_dbg(ln, | ||
1207 | "ignoring event %d recv from did x%x" | ||
1208 | "in ln state[ready].\n", evt, ln->nport_id); | ||
1209 | CSIO_INC_STATS(ln, n_evt_drop); | ||
1210 | break; | ||
1211 | |||
1212 | case CSIO_LNE_LINK_DOWN: | ||
1213 | csio_set_state(&ln->sm, csio_lns_offline); | ||
1214 | csio_post_event_rns(ln, CSIO_RNFE_DOWN); | ||
1215 | |||
1216 | spin_unlock_irq(&hw->lock); | ||
1217 | csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN); | ||
1218 | spin_lock_irq(&hw->lock); | ||
1219 | |||
1220 | if (csio_is_phys_ln(ln)) { | ||
1221 | /* Remove FCF entry */ | ||
1222 | list_del_init(&ln->fcfinfo->list); | ||
1223 | } | ||
1224 | break; | ||
1225 | |||
1226 | case CSIO_LNE_DOWN_LINK: | ||
1227 | csio_set_state(&ln->sm, csio_lns_offline); | ||
1228 | csio_post_event_rns(ln, CSIO_RNFE_DOWN); | ||
1229 | |||
1230 | /* Host need to issue aborts in case if FW has not returned | ||
1231 | * WRs with status "ABORTED" | ||
1232 | */ | ||
1233 | spin_unlock_irq(&hw->lock); | ||
1234 | csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN); | ||
1235 | spin_lock_irq(&hw->lock); | ||
1236 | |||
1237 | if (csio_is_phys_ln(ln)) { | ||
1238 | /* Remove FCF entry */ | ||
1239 | list_del_init(&ln->fcfinfo->list); | ||
1240 | } | ||
1241 | break; | ||
1242 | |||
1243 | case CSIO_LNE_CLOSE: | ||
1244 | csio_set_state(&ln->sm, csio_lns_uninit); | ||
1245 | csio_post_event_rns(ln, CSIO_RNFE_CLOSE); | ||
1246 | break; | ||
1247 | |||
1248 | case CSIO_LNE_LOGO: | ||
1249 | csio_set_state(&ln->sm, csio_lns_offline); | ||
1250 | csio_post_event_rns(ln, CSIO_RNFE_DOWN); | ||
1251 | break; | ||
1252 | |||
1253 | default: | ||
1254 | csio_ln_dbg(ln, | ||
1255 | "unexp ln event %d recv from did:x%x in " | ||
1256 | "ln state[uninit].\n", evt, ln->nport_id); | ||
1257 | CSIO_INC_STATS(ln, n_evt_unexp); | ||
1258 | CSIO_DB_ASSERT(0); | ||
1259 | break; | ||
1260 | } /* switch event */ | ||
1261 | } | ||
1262 | |||
1263 | /* | ||
1264 | * csio_lns_offline - The request in offline state. | ||
1265 | * @ln - FCOE lnode. | ||
1266 | * @evt - Event to be processed. | ||
1267 | * | ||
1268 | * Process the given lnode event which is currently in "offline" state. | ||
1269 | * Invoked with HW lock held. | ||
1270 | * Return - none. | ||
1271 | */ | ||
1272 | static void | ||
1273 | csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt) | ||
1274 | { | ||
1275 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
1276 | struct csio_lnode *rln = hw->rln; | ||
1277 | int rv; | ||
1278 | |||
1279 | CSIO_INC_STATS(ln, n_evt_sm[evt]); | ||
1280 | switch (evt) { | ||
1281 | case CSIO_LNE_LINKUP: | ||
1282 | csio_set_state(&ln->sm, csio_lns_online); | ||
1283 | /* Read FCF only for physical lnode */ | ||
1284 | if (csio_is_phys_ln(ln)) { | ||
1285 | rv = csio_ln_read_fcf_entry(ln, | ||
1286 | csio_ln_read_fcf_cbfn); | ||
1287 | if (rv != 0) { | ||
1288 | /* TODO: Send HW RESET event */ | ||
1289 | CSIO_INC_STATS(ln, n_err); | ||
1290 | break; | ||
1291 | } | ||
1292 | |||
1293 | /* Add FCF record */ | ||
1294 | list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead); | ||
1295 | } | ||
1296 | |||
1297 | rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn); | ||
1298 | if (rv != 0) { | ||
1299 | /* TODO: Send HW RESET event */ | ||
1300 | CSIO_INC_STATS(ln, n_err); | ||
1301 | } | ||
1302 | break; | ||
1303 | |||
1304 | case CSIO_LNE_LINK_DOWN: | ||
1305 | case CSIO_LNE_DOWN_LINK: | ||
1306 | case CSIO_LNE_LOGO: | ||
1307 | csio_ln_dbg(ln, | ||
1308 | "ignoring event %d recv from did x%x" | ||
1309 | "in ln state[offline].\n", evt, ln->nport_id); | ||
1310 | CSIO_INC_STATS(ln, n_evt_drop); | ||
1311 | break; | ||
1312 | |||
1313 | case CSIO_LNE_CLOSE: | ||
1314 | csio_set_state(&ln->sm, csio_lns_uninit); | ||
1315 | csio_post_event_rns(ln, CSIO_RNFE_CLOSE); | ||
1316 | break; | ||
1317 | |||
1318 | default: | ||
1319 | csio_ln_dbg(ln, | ||
1320 | "unexp ln event %d recv from did:x%x in " | ||
1321 | "ln state[offline]\n", evt, ln->nport_id); | ||
1322 | CSIO_INC_STATS(ln, n_evt_unexp); | ||
1323 | CSIO_DB_ASSERT(0); | ||
1324 | break; | ||
1325 | } /* switch event */ | ||
1326 | } | ||
1327 | |||
1328 | /*****************************************************************************/ | ||
1329 | /* END: Lnode SM */ | ||
1330 | /*****************************************************************************/ | ||
1331 | |||
1332 | static void | ||
1333 | csio_free_fcfinfo(struct kref *kref) | ||
1334 | { | ||
1335 | struct csio_fcf_info *fcfinfo = container_of(kref, | ||
1336 | struct csio_fcf_info, kref); | ||
1337 | kfree(fcfinfo); | ||
1338 | } | ||
1339 | |||
1340 | /* Helper routines for attributes */ | ||
1341 | /* | ||
1342 | * csio_lnode_state_to_str - Get current state of FCOE lnode. | ||
1343 | * @ln - lnode | ||
1344 | * @str - state of lnode. | ||
1345 | * | ||
1346 | */ | ||
1347 | void | ||
1348 | csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str) | ||
1349 | { | ||
1350 | if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) { | ||
1351 | strcpy(str, "UNINIT"); | ||
1352 | return; | ||
1353 | } | ||
1354 | if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) { | ||
1355 | strcpy(str, "READY"); | ||
1356 | return; | ||
1357 | } | ||
1358 | if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) { | ||
1359 | strcpy(str, "OFFLINE"); | ||
1360 | return; | ||
1361 | } | ||
1362 | strcpy(str, "UNKNOWN"); | ||
1363 | } /* csio_lnode_state_to_str */ | ||
1364 | |||
1365 | |||
1366 | int | ||
1367 | csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid, | ||
1368 | struct fw_fcoe_port_stats *port_stats) | ||
1369 | { | ||
1370 | struct csio_mb *mbp; | ||
1371 | struct fw_fcoe_port_cmd_params portparams; | ||
1372 | enum fw_retval retval; | ||
1373 | int idx; | ||
1374 | |||
1375 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
1376 | if (!mbp) { | ||
1377 | csio_err(hw, "FCoE FCF PARAMS command out of memory!\n"); | ||
1378 | return -EINVAL; | ||
1379 | } | ||
1380 | portparams.portid = portid; | ||
1381 | |||
1382 | for (idx = 1; idx <= 3; idx++) { | ||
1383 | portparams.idx = (idx-1)*6 + 1; | ||
1384 | portparams.nstats = 6; | ||
1385 | if (idx == 3) | ||
1386 | portparams.nstats = 4; | ||
1387 | csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, | ||
1388 | &portparams, NULL); | ||
1389 | if (csio_mb_issue(hw, mbp)) { | ||
1390 | csio_err(hw, "Issue of FCoE port params failed!\n"); | ||
1391 | mempool_free(mbp, hw->mb_mempool); | ||
1392 | return -EINVAL; | ||
1393 | } | ||
1394 | csio_mb_process_portparams_rsp(hw, mbp, &retval, | ||
1395 | &portparams, port_stats); | ||
1396 | } | ||
1397 | |||
1398 | mempool_free(mbp, hw->mb_mempool); | ||
1399 | return 0; | ||
1400 | } | ||
1401 | |||
1402 | /* | ||
1403 | * csio_ln_mgmt_wr_handler -Mgmt Work Request handler. | ||
1404 | * @wr - WR. | ||
1405 | * @len - WR len. | ||
1406 | * This handler is invoked when an outstanding mgmt WR is completed. | ||
1407 | * Its invoked in the context of FW event worker thread for every | ||
1408 | * mgmt event received. | ||
1409 | * Return - none. | ||
1410 | */ | ||
1411 | |||
1412 | static void | ||
1413 | csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len) | ||
1414 | { | ||
1415 | struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); | ||
1416 | struct csio_ioreq *io_req = NULL; | ||
1417 | struct fw_fcoe_els_ct_wr *wr_cmd; | ||
1418 | |||
1419 | |||
1420 | wr_cmd = (struct fw_fcoe_els_ct_wr *) wr; | ||
1421 | |||
1422 | if (len < sizeof(struct fw_fcoe_els_ct_wr)) { | ||
1423 | csio_err(mgmtm->hw, | ||
1424 | "Invalid ELS CT WR length recvd, len:%x\n", len); | ||
1425 | mgmtm->stats.n_err++; | ||
1426 | return; | ||
1427 | } | ||
1428 | |||
1429 | io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie); | ||
1430 | io_req->wr_status = csio_wr_status(wr_cmd); | ||
1431 | |||
1432 | /* lookup ioreq exists in our active Q */ | ||
1433 | spin_lock_irq(&hw->lock); | ||
1434 | if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) { | ||
1435 | csio_err(mgmtm->hw, | ||
1436 | "Error- Invalid IO handle recv in WR. handle: %p\n", | ||
1437 | io_req); | ||
1438 | mgmtm->stats.n_err++; | ||
1439 | spin_unlock_irq(&hw->lock); | ||
1440 | return; | ||
1441 | } | ||
1442 | |||
1443 | mgmtm = csio_hw_to_mgmtm(hw); | ||
1444 | |||
1445 | /* Dequeue from active queue */ | ||
1446 | list_del_init(&io_req->sm.sm_list); | ||
1447 | mgmtm->stats.n_active--; | ||
1448 | spin_unlock_irq(&hw->lock); | ||
1449 | |||
1450 | /* io_req will be freed by completion handler */ | ||
1451 | if (io_req->io_cbfn) | ||
1452 | io_req->io_cbfn(hw, io_req); | ||
1453 | } | ||
1454 | |||
1455 | /** | ||
1456 | * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events. | ||
1457 | * @hw: HW module | ||
1458 | * @cpl_op: CPL opcode | ||
1459 | * @cmd: FW cmd/WR. | ||
1460 | * | ||
1461 | * Process received FCoE cmd/WR event from FW. | ||
1462 | */ | ||
1463 | void | ||
1464 | csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd) | ||
1465 | { | ||
1466 | struct csio_lnode *ln; | ||
1467 | struct csio_rnode *rn; | ||
1468 | uint8_t portid, opcode = *(uint8_t *)cmd; | ||
1469 | struct fw_fcoe_link_cmd *lcmd; | ||
1470 | struct fw_wr_hdr *wr; | ||
1471 | struct fw_rdev_wr *rdev_wr; | ||
1472 | enum fw_fcoe_link_status lstatus; | ||
1473 | uint32_t fcfi, rdev_flowid, vnpi; | ||
1474 | enum csio_ln_ev evt; | ||
1475 | |||
1476 | if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) { | ||
1477 | |||
1478 | lcmd = (struct fw_fcoe_link_cmd *)cmd; | ||
1479 | lstatus = lcmd->lstatus; | ||
1480 | portid = FW_FCOE_LINK_CMD_PORTID_GET( | ||
1481 | ntohl(lcmd->op_to_portid)); | ||
1482 | fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi)); | ||
1483 | vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd)); | ||
1484 | |||
1485 | if (lstatus == FCOE_LINKUP) { | ||
1486 | |||
1487 | /* HW lock here */ | ||
1488 | spin_lock_irq(&hw->lock); | ||
1489 | csio_handle_link_up(hw, portid, fcfi, vnpi); | ||
1490 | spin_unlock_irq(&hw->lock); | ||
1491 | /* HW un lock here */ | ||
1492 | |||
1493 | } else if (lstatus == FCOE_LINKDOWN) { | ||
1494 | |||
1495 | /* HW lock here */ | ||
1496 | spin_lock_irq(&hw->lock); | ||
1497 | csio_handle_link_down(hw, portid, fcfi, vnpi); | ||
1498 | spin_unlock_irq(&hw->lock); | ||
1499 | /* HW un lock here */ | ||
1500 | } else { | ||
1501 | csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n", | ||
1502 | ntohl(lcmd->lstatus)); | ||
1503 | CSIO_INC_STATS(hw, n_cpl_unexp); | ||
1504 | } | ||
1505 | } else if (cpl_op == CPL_FW6_PLD) { | ||
1506 | wr = (struct fw_wr_hdr *) (cmd + 4); | ||
1507 | if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) | ||
1508 | == FW_RDEV_WR) { | ||
1509 | |||
1510 | rdev_wr = (struct fw_rdev_wr *) (cmd + 4); | ||
1511 | |||
1512 | rdev_flowid = FW_RDEV_WR_FLOWID_GET( | ||
1513 | ntohl(rdev_wr->alloc_to_len16)); | ||
1514 | vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET( | ||
1515 | ntohl(rdev_wr->flags_to_assoc_flowid)); | ||
1516 | |||
1517 | csio_dbg(hw, | ||
1518 | "FW_RDEV_WR: flowid:x%x ev_cause:x%x " | ||
1519 | "vnpi:0x%x\n", rdev_flowid, | ||
1520 | rdev_wr->event_cause, vnpi); | ||
1521 | |||
1522 | if (rdev_wr->protocol != PROT_FCOE) { | ||
1523 | csio_err(hw, | ||
1524 | "FW_RDEV_WR: invalid proto:x%x " | ||
1525 | "received with flowid:x%x\n", | ||
1526 | rdev_wr->protocol, | ||
1527 | rdev_flowid); | ||
1528 | CSIO_INC_STATS(hw, n_evt_drop); | ||
1529 | return; | ||
1530 | } | ||
1531 | |||
1532 | /* HW lock here */ | ||
1533 | spin_lock_irq(&hw->lock); | ||
1534 | ln = csio_ln_lookup_by_vnpi(hw, vnpi); | ||
1535 | if (!ln) { | ||
1536 | csio_err(hw, | ||
1537 | "FW_DEV_WR: invalid vnpi:x%x received " | ||
1538 | "with flowid:x%x\n", vnpi, rdev_flowid); | ||
1539 | CSIO_INC_STATS(hw, n_evt_drop); | ||
1540 | goto out_pld; | ||
1541 | } | ||
1542 | |||
1543 | rn = csio_confirm_rnode(ln, rdev_flowid, | ||
1544 | &rdev_wr->u.fcoe_rdev); | ||
1545 | if (!rn) { | ||
1546 | csio_ln_dbg(ln, | ||
1547 | "Failed to confirm rnode " | ||
1548 | "for flowid:x%x\n", rdev_flowid); | ||
1549 | CSIO_INC_STATS(hw, n_evt_drop); | ||
1550 | goto out_pld; | ||
1551 | } | ||
1552 | |||
1553 | /* save previous event for debugging */ | ||
1554 | ln->prev_evt = ln->cur_evt; | ||
1555 | ln->cur_evt = rdev_wr->event_cause; | ||
1556 | CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]); | ||
1557 | |||
1558 | /* Translate all the fabric events to lnode SM events */ | ||
1559 | evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause); | ||
1560 | if (evt) { | ||
1561 | csio_ln_dbg(ln, | ||
1562 | "Posting event to lnode event:%d " | ||
1563 | "cause:%d flowid:x%x\n", evt, | ||
1564 | rdev_wr->event_cause, rdev_flowid); | ||
1565 | csio_post_event(&ln->sm, evt); | ||
1566 | } | ||
1567 | |||
1568 | /* Handover event to rn SM here. */ | ||
1569 | csio_rnode_fwevt_handler(rn, rdev_wr->event_cause); | ||
1570 | out_pld: | ||
1571 | spin_unlock_irq(&hw->lock); | ||
1572 | return; | ||
1573 | } else { | ||
1574 | csio_warn(hw, "unexpected WR op(0x%x) recv\n", | ||
1575 | FW_WR_OP_GET(be32_to_cpu((wr->hi)))); | ||
1576 | CSIO_INC_STATS(hw, n_cpl_unexp); | ||
1577 | } | ||
1578 | } else if (cpl_op == CPL_FW6_MSG) { | ||
1579 | wr = (struct fw_wr_hdr *) (cmd); | ||
1580 | if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) { | ||
1581 | csio_ln_mgmt_wr_handler(hw, wr, | ||
1582 | sizeof(struct fw_fcoe_els_ct_wr)); | ||
1583 | } else { | ||
1584 | csio_warn(hw, "unexpected WR op(0x%x) recv\n", | ||
1585 | FW_WR_OP_GET(be32_to_cpu((wr->hi)))); | ||
1586 | CSIO_INC_STATS(hw, n_cpl_unexp); | ||
1587 | } | ||
1588 | } else { | ||
1589 | csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode); | ||
1590 | CSIO_INC_STATS(hw, n_cpl_unexp); | ||
1591 | } | ||
1592 | } | ||
1593 | |||
1594 | /** | ||
1595 | * csio_lnode_start - Kickstart lnode discovery. | ||
1596 | * @ln: lnode | ||
1597 | * | ||
1598 | * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command. | ||
1599 | */ | ||
1600 | int | ||
1601 | csio_lnode_start(struct csio_lnode *ln) | ||
1602 | { | ||
1603 | int rv = 0; | ||
1604 | if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) { | ||
1605 | rv = csio_fcoe_enable_link(ln, 1); | ||
1606 | ln->flags |= CSIO_LNF_LINK_ENABLE; | ||
1607 | } | ||
1608 | |||
1609 | return rv; | ||
1610 | } | ||
1611 | |||
1612 | /** | ||
1613 | * csio_lnode_stop - Stop the lnode. | ||
1614 | * @ln: lnode | ||
1615 | * | ||
1616 | * This routine is invoked by HW module to stop lnode and its associated NPIV | ||
1617 | * lnodes. | ||
1618 | */ | ||
1619 | void | ||
1620 | csio_lnode_stop(struct csio_lnode *ln) | ||
1621 | { | ||
1622 | csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK); | ||
1623 | if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) { | ||
1624 | csio_fcoe_enable_link(ln, 0); | ||
1625 | ln->flags &= ~CSIO_LNF_LINK_ENABLE; | ||
1626 | } | ||
1627 | csio_ln_dbg(ln, "stopping ln :%p\n", ln); | ||
1628 | } | ||
1629 | |||
1630 | /** | ||
1631 | * csio_lnode_close - Close an lnode. | ||
1632 | * @ln: lnode | ||
1633 | * | ||
1634 | * This routine is invoked by HW module to close an lnode and its | ||
1635 | * associated NPIV lnodes. Lnode and its associated NPIV lnodes are | ||
1636 | * set to uninitialized state. | ||
1637 | */ | ||
1638 | void | ||
1639 | csio_lnode_close(struct csio_lnode *ln) | ||
1640 | { | ||
1641 | csio_post_event_lns(ln, CSIO_LNE_CLOSE); | ||
1642 | if (csio_is_phys_ln(ln)) | ||
1643 | ln->vnp_flowid = CSIO_INVALID_IDX; | ||
1644 | |||
1645 | csio_ln_dbg(ln, "closed ln :%p\n", ln); | ||
1646 | } | ||
1647 | |||
1648 | /* | ||
1649 | * csio_ln_prep_ecwr - Prepare ELS/CT WR. | ||
1650 | * @io_req - IO request. | ||
1651 | * @wr_len - WR len | ||
1652 | * @immd_len - WR immediate data | ||
1653 | * @sub_op - Sub opcode | ||
1654 | * @sid - source portid. | ||
1655 | * @did - destination portid | ||
1656 | * @flow_id - flowid | ||
1657 | * @fw_wr - ELS/CT WR to be prepared. | ||
1658 | * Returns: 0 - on success | ||
1659 | */ | ||
1660 | static int | ||
1661 | csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len, | ||
1662 | uint32_t immd_len, uint8_t sub_op, uint32_t sid, | ||
1663 | uint32_t did, uint32_t flow_id, uint8_t *fw_wr) | ||
1664 | { | ||
1665 | struct fw_fcoe_els_ct_wr *wr; | ||
1666 | uint32_t port_id; | ||
1667 | |||
1668 | wr = (struct fw_fcoe_els_ct_wr *)fw_wr; | ||
1669 | wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_FCOE_ELS_CT_WR) | | ||
1670 | FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len)); | ||
1671 | |||
1672 | wr_len = DIV_ROUND_UP(wr_len, 16); | ||
1673 | wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(flow_id) | | ||
1674 | FW_WR_LEN16(wr_len)); | ||
1675 | wr->els_ct_type = sub_op; | ||
1676 | wr->ctl_pri = 0; | ||
1677 | wr->cp_en_class = 0; | ||
1678 | wr->cookie = io_req->fw_handle; | ||
1679 | wr->iqid = (uint16_t)cpu_to_be16(csio_q_physiqid( | ||
1680 | io_req->lnode->hwp, io_req->iq_idx)); | ||
1681 | wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1); | ||
1682 | wr->tmo_val = (uint8_t) io_req->tmo; | ||
1683 | port_id = htonl(sid); | ||
1684 | memcpy(wr->l_id, PORT_ID_PTR(port_id), 3); | ||
1685 | port_id = htonl(did); | ||
1686 | memcpy(wr->r_id, PORT_ID_PTR(port_id), 3); | ||
1687 | |||
1688 | /* Prepare RSP SGL */ | ||
1689 | wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len); | ||
1690 | wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr); | ||
1691 | return 0; | ||
1692 | } | ||
1693 | |||
1694 | /* | ||
1695 | * csio_ln_mgmt_submit_wr - Post elsct work request. | ||
1696 | * @mgmtm - mgmtm | ||
1697 | * @io_req - io request. | ||
1698 | * @sub_op - ELS or CT request type | ||
1699 | * @pld - Dma Payload buffer | ||
1700 | * @pld_len - Payload len | ||
1701 | * Prepares ELSCT Work request and sents it to FW. | ||
1702 | * Returns: 0 - on success | ||
1703 | */ | ||
1704 | static int | ||
1705 | csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req, | ||
1706 | uint8_t sub_op, struct csio_dma_buf *pld, | ||
1707 | uint32_t pld_len) | ||
1708 | { | ||
1709 | struct csio_wr_pair wrp; | ||
1710 | struct csio_lnode *ln = io_req->lnode; | ||
1711 | struct csio_rnode *rn = io_req->rnode; | ||
1712 | struct csio_hw *hw = mgmtm->hw; | ||
1713 | uint8_t fw_wr[64]; | ||
1714 | struct ulptx_sgl dsgl; | ||
1715 | uint32_t wr_size = 0; | ||
1716 | uint8_t im_len = 0; | ||
1717 | uint32_t wr_off = 0; | ||
1718 | |||
1719 | int ret = 0; | ||
1720 | |||
1721 | /* Calculate WR Size for this ELS REQ */ | ||
1722 | wr_size = sizeof(struct fw_fcoe_els_ct_wr); | ||
1723 | |||
1724 | /* Send as immediate data if pld < 256 */ | ||
1725 | if (pld_len < 256) { | ||
1726 | wr_size += ALIGN(pld_len, 8); | ||
1727 | im_len = (uint8_t)pld_len; | ||
1728 | } else | ||
1729 | wr_size += sizeof(struct ulptx_sgl); | ||
1730 | |||
1731 | /* Roundup WR size in units of 16 bytes */ | ||
1732 | wr_size = ALIGN(wr_size, 16); | ||
1733 | |||
1734 | /* Get WR to send ELS REQ */ | ||
1735 | ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp); | ||
1736 | if (ret != 0) { | ||
1737 | csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n", | ||
1738 | io_req, ret); | ||
1739 | return ret; | ||
1740 | } | ||
1741 | |||
1742 | /* Prepare Generic WR used by all ELS/CT cmd */ | ||
1743 | csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op, | ||
1744 | ln->nport_id, rn->nport_id, | ||
1745 | csio_rn_flowid(rn), | ||
1746 | &fw_wr[0]); | ||
1747 | |||
1748 | /* Copy ELS/CT WR CMD */ | ||
1749 | csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off, | ||
1750 | sizeof(struct fw_fcoe_els_ct_wr)); | ||
1751 | wr_off += sizeof(struct fw_fcoe_els_ct_wr); | ||
1752 | |||
1753 | /* Copy payload to Immediate section of WR */ | ||
1754 | if (im_len) | ||
1755 | csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len); | ||
1756 | else { | ||
1757 | /* Program DSGL to dma payload */ | ||
1758 | dsgl.cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | | ||
1759 | ULPTX_MORE | ULPTX_NSGE(1)); | ||
1760 | dsgl.len0 = cpu_to_be32(pld_len); | ||
1761 | dsgl.addr0 = cpu_to_be64(pld->paddr); | ||
1762 | csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8), | ||
1763 | sizeof(struct ulptx_sgl)); | ||
1764 | } | ||
1765 | |||
1766 | /* Issue work request to xmit ELS/CT req to FW */ | ||
1767 | csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false); | ||
1768 | return ret; | ||
1769 | } | ||
1770 | |||
1771 | /* | ||
1772 | * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request. | ||
1773 | * @io_req - IO Request | ||
1774 | * @io_cbfn - Completion handler. | ||
1775 | * @req_type - ELS or CT request type | ||
1776 | * @pld - Dma Payload buffer | ||
1777 | * @pld_len - Payload len | ||
1778 | * | ||
1779 | * | ||
1780 | * This API used submit managment ELS/CT request. | ||
1781 | * This called with hw lock held | ||
1782 | * Returns: 0 - on success | ||
1783 | * -ENOMEM - on error. | ||
1784 | */ | ||
1785 | static int | ||
1786 | csio_ln_mgmt_submit_req(struct csio_ioreq *io_req, | ||
1787 | void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *), | ||
1788 | enum fcoe_cmn_type req_type, struct csio_dma_buf *pld, | ||
1789 | uint32_t pld_len) | ||
1790 | { | ||
1791 | struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode); | ||
1792 | struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); | ||
1793 | int rv; | ||
1794 | |||
1795 | io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */ | ||
1796 | io_req->fw_handle = (uintptr_t) (io_req); | ||
1797 | io_req->eq_idx = mgmtm->eq_idx; | ||
1798 | io_req->iq_idx = mgmtm->iq_idx; | ||
1799 | |||
1800 | rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len); | ||
1801 | if (rv == 0) { | ||
1802 | list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q); | ||
1803 | mgmtm->stats.n_active++; | ||
1804 | } | ||
1805 | return rv; | ||
1806 | } | ||
1807 | |||
1808 | /* | ||
1809 | * csio_ln_fdmi_init - FDMI Init entry point. | ||
1810 | * @ln: lnode | ||
1811 | */ | ||
1812 | static int | ||
1813 | csio_ln_fdmi_init(struct csio_lnode *ln) | ||
1814 | { | ||
1815 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
1816 | struct csio_dma_buf *dma_buf; | ||
1817 | |||
1818 | /* Allocate MGMT request required for FDMI */ | ||
1819 | ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL); | ||
1820 | if (!ln->mgmt_req) { | ||
1821 | csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n"); | ||
1822 | CSIO_INC_STATS(hw, n_err_nomem); | ||
1823 | return -ENOMEM; | ||
1824 | } | ||
1825 | |||
1826 | /* Allocate Dma buffers for FDMI response Payload */ | ||
1827 | dma_buf = &ln->mgmt_req->dma_buf; | ||
1828 | dma_buf->len = 2048; | ||
1829 | dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len, | ||
1830 | &dma_buf->paddr); | ||
1831 | if (!dma_buf->vaddr) { | ||
1832 | csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n"); | ||
1833 | kfree(ln->mgmt_req); | ||
1834 | ln->mgmt_req = NULL; | ||
1835 | return -ENOMEM; | ||
1836 | } | ||
1837 | |||
1838 | ln->flags |= CSIO_LNF_FDMI_ENABLE; | ||
1839 | return 0; | ||
1840 | } | ||
1841 | |||
1842 | /* | ||
1843 | * csio_ln_fdmi_exit - FDMI exit entry point. | ||
1844 | * @ln: lnode | ||
1845 | */ | ||
1846 | static int | ||
1847 | csio_ln_fdmi_exit(struct csio_lnode *ln) | ||
1848 | { | ||
1849 | struct csio_dma_buf *dma_buf; | ||
1850 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
1851 | |||
1852 | if (!ln->mgmt_req) | ||
1853 | return 0; | ||
1854 | |||
1855 | dma_buf = &ln->mgmt_req->dma_buf; | ||
1856 | if (dma_buf->vaddr) | ||
1857 | pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr, | ||
1858 | dma_buf->paddr); | ||
1859 | |||
1860 | kfree(ln->mgmt_req); | ||
1861 | return 0; | ||
1862 | } | ||
1863 | |||
1864 | int | ||
1865 | csio_scan_done(struct csio_lnode *ln, unsigned long ticks, | ||
1866 | unsigned long time, unsigned long max_scan_ticks, | ||
1867 | unsigned long delta_scan_ticks) | ||
1868 | { | ||
1869 | int rv = 0; | ||
1870 | |||
1871 | if (time >= max_scan_ticks) | ||
1872 | return 1; | ||
1873 | |||
1874 | if (!ln->tgt_scan_tick) | ||
1875 | ln->tgt_scan_tick = ticks; | ||
1876 | |||
1877 | if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) { | ||
1878 | if (!ln->last_scan_ntgts) | ||
1879 | ln->last_scan_ntgts = ln->n_scsi_tgts; | ||
1880 | else { | ||
1881 | if (ln->last_scan_ntgts == ln->n_scsi_tgts) | ||
1882 | return 1; | ||
1883 | |||
1884 | ln->last_scan_ntgts = ln->n_scsi_tgts; | ||
1885 | } | ||
1886 | ln->tgt_scan_tick = ticks; | ||
1887 | } | ||
1888 | return rv; | ||
1889 | } | ||
1890 | |||
1891 | /* | ||
1892 | * csio_notify_lnodes: | ||
1893 | * @hw: HW module | ||
1894 | * @note: Notification | ||
1895 | * | ||
1896 | * Called from the HW SM to fan out notifications to the | ||
1897 | * Lnode SM. Since the HW SM is entered with lock held, | ||
1898 | * there is no need to hold locks here. | ||
1899 | * | ||
1900 | */ | ||
1901 | void | ||
1902 | csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note) | ||
1903 | { | ||
1904 | struct list_head *tmp; | ||
1905 | struct csio_lnode *ln; | ||
1906 | |||
1907 | csio_dbg(hw, "Notifying all nodes of event %d\n", note); | ||
1908 | |||
1909 | /* Traverse children lnodes list and send evt */ | ||
1910 | list_for_each(tmp, &hw->sln_head) { | ||
1911 | ln = (struct csio_lnode *) tmp; | ||
1912 | |||
1913 | switch (note) { | ||
1914 | case CSIO_LN_NOTIFY_HWREADY: | ||
1915 | csio_lnode_start(ln); | ||
1916 | break; | ||
1917 | |||
1918 | case CSIO_LN_NOTIFY_HWRESET: | ||
1919 | case CSIO_LN_NOTIFY_HWREMOVE: | ||
1920 | csio_lnode_close(ln); | ||
1921 | break; | ||
1922 | |||
1923 | case CSIO_LN_NOTIFY_HWSTOP: | ||
1924 | csio_lnode_stop(ln); | ||
1925 | break; | ||
1926 | |||
1927 | default: | ||
1928 | break; | ||
1929 | |||
1930 | } | ||
1931 | } | ||
1932 | } | ||
1933 | |||
1934 | /* | ||
1935 | * csio_disable_lnodes: | ||
1936 | * @hw: HW module | ||
1937 | * @portid:port id | ||
1938 | * @disable: disable/enable flag. | ||
1939 | * If disable=1, disables all lnode hosted on given physical port. | ||
1940 | * otherwise enables all the lnodes on given phsysical port. | ||
1941 | * This routine need to called with hw lock held. | ||
1942 | */ | ||
1943 | void | ||
1944 | csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable) | ||
1945 | { | ||
1946 | struct list_head *tmp; | ||
1947 | struct csio_lnode *ln; | ||
1948 | |||
1949 | csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid); | ||
1950 | |||
1951 | /* Traverse sibling lnodes list and send evt */ | ||
1952 | list_for_each(tmp, &hw->sln_head) { | ||
1953 | ln = (struct csio_lnode *) tmp; | ||
1954 | if (ln->portid != portid) | ||
1955 | continue; | ||
1956 | |||
1957 | if (disable) | ||
1958 | csio_lnode_stop(ln); | ||
1959 | else | ||
1960 | csio_lnode_start(ln); | ||
1961 | } | ||
1962 | } | ||
1963 | |||
1964 | /* | ||
1965 | * csio_ln_init - Initialize an lnode. | ||
1966 | * @ln: lnode | ||
1967 | * | ||
1968 | */ | ||
1969 | static int | ||
1970 | csio_ln_init(struct csio_lnode *ln) | ||
1971 | { | ||
1972 | int rv = -EINVAL; | ||
1973 | struct csio_lnode *rln, *pln; | ||
1974 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
1975 | |||
1976 | csio_init_state(&ln->sm, csio_lns_uninit); | ||
1977 | ln->vnp_flowid = CSIO_INVALID_IDX; | ||
1978 | ln->fcf_flowid = CSIO_INVALID_IDX; | ||
1979 | |||
1980 | if (csio_is_root_ln(ln)) { | ||
1981 | |||
1982 | /* This is the lnode used during initialization */ | ||
1983 | |||
1984 | ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL); | ||
1985 | if (!ln->fcfinfo) { | ||
1986 | csio_ln_err(ln, "Failed to alloc FCF record\n"); | ||
1987 | CSIO_INC_STATS(hw, n_err_nomem); | ||
1988 | goto err; | ||
1989 | } | ||
1990 | |||
1991 | INIT_LIST_HEAD(&ln->fcf_lsthead); | ||
1992 | kref_init(&ln->fcfinfo->kref); | ||
1993 | |||
1994 | if (csio_fdmi_enable && csio_ln_fdmi_init(ln)) | ||
1995 | goto err; | ||
1996 | |||
1997 | } else { /* Either a non-root physical or a virtual lnode */ | ||
1998 | |||
1999 | /* | ||
2000 | * THe rest is common for non-root physical and NPIV lnodes. | ||
2001 | * Just get references to all other modules | ||
2002 | */ | ||
2003 | rln = csio_root_lnode(ln); | ||
2004 | |||
2005 | if (csio_is_npiv_ln(ln)) { | ||
2006 | /* NPIV */ | ||
2007 | pln = csio_parent_lnode(ln); | ||
2008 | kref_get(&pln->fcfinfo->kref); | ||
2009 | ln->fcfinfo = pln->fcfinfo; | ||
2010 | } else { | ||
2011 | /* Another non-root physical lnode (FCF) */ | ||
2012 | ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), | ||
2013 | GFP_KERNEL); | ||
2014 | if (!ln->fcfinfo) { | ||
2015 | csio_ln_err(ln, "Failed to alloc FCF info\n"); | ||
2016 | CSIO_INC_STATS(hw, n_err_nomem); | ||
2017 | goto err; | ||
2018 | } | ||
2019 | |||
2020 | kref_init(&ln->fcfinfo->kref); | ||
2021 | |||
2022 | if (csio_fdmi_enable && csio_ln_fdmi_init(ln)) | ||
2023 | goto err; | ||
2024 | } | ||
2025 | |||
2026 | } /* if (!csio_is_root_ln(ln)) */ | ||
2027 | |||
2028 | return 0; | ||
2029 | err: | ||
2030 | return rv; | ||
2031 | } | ||
2032 | |||
2033 | static void | ||
2034 | csio_ln_exit(struct csio_lnode *ln) | ||
2035 | { | ||
2036 | struct csio_lnode *pln; | ||
2037 | |||
2038 | csio_cleanup_rns(ln); | ||
2039 | if (csio_is_npiv_ln(ln)) { | ||
2040 | pln = csio_parent_lnode(ln); | ||
2041 | kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo); | ||
2042 | } else { | ||
2043 | kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo); | ||
2044 | if (csio_fdmi_enable) | ||
2045 | csio_ln_fdmi_exit(ln); | ||
2046 | } | ||
2047 | ln->fcfinfo = NULL; | ||
2048 | } | ||
2049 | |||
2050 | /** | ||
2051 | * csio_lnode_init - Initialize the members of an lnode. | ||
2052 | * @ln: lnode | ||
2053 | * | ||
2054 | */ | ||
2055 | int | ||
2056 | csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw, | ||
2057 | struct csio_lnode *pln) | ||
2058 | { | ||
2059 | int rv = -EINVAL; | ||
2060 | |||
2061 | /* Link this lnode to hw */ | ||
2062 | csio_lnode_to_hw(ln) = hw; | ||
2063 | |||
2064 | /* Link child to parent if child lnode */ | ||
2065 | if (pln) | ||
2066 | ln->pln = pln; | ||
2067 | else | ||
2068 | ln->pln = NULL; | ||
2069 | |||
2070 | /* Initialize scsi_tgt and timers to zero */ | ||
2071 | ln->n_scsi_tgts = 0; | ||
2072 | ln->last_scan_ntgts = 0; | ||
2073 | ln->tgt_scan_tick = 0; | ||
2074 | |||
2075 | /* Initialize rnode list */ | ||
2076 | INIT_LIST_HEAD(&ln->rnhead); | ||
2077 | INIT_LIST_HEAD(&ln->cln_head); | ||
2078 | |||
2079 | /* Initialize log level for debug */ | ||
2080 | ln->params.log_level = hw->params.log_level; | ||
2081 | |||
2082 | if (csio_ln_init(ln)) | ||
2083 | goto err; | ||
2084 | |||
2085 | /* Add lnode to list of sibling or children lnodes */ | ||
2086 | spin_lock_irq(&hw->lock); | ||
2087 | list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head); | ||
2088 | if (pln) | ||
2089 | pln->num_vports++; | ||
2090 | spin_unlock_irq(&hw->lock); | ||
2091 | |||
2092 | hw->num_lns++; | ||
2093 | |||
2094 | return 0; | ||
2095 | err: | ||
2096 | csio_lnode_to_hw(ln) = NULL; | ||
2097 | return rv; | ||
2098 | } | ||
2099 | |||
2100 | /** | ||
2101 | * csio_lnode_exit - De-instantiate an lnode. | ||
2102 | * @ln: lnode | ||
2103 | * | ||
2104 | */ | ||
2105 | void | ||
2106 | csio_lnode_exit(struct csio_lnode *ln) | ||
2107 | { | ||
2108 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
2109 | |||
2110 | csio_ln_exit(ln); | ||
2111 | |||
2112 | /* Remove this lnode from hw->sln_head */ | ||
2113 | spin_lock_irq(&hw->lock); | ||
2114 | |||
2115 | list_del_init(&ln->sm.sm_list); | ||
2116 | |||
2117 | /* If it is children lnode, decrement the | ||
2118 | * counter in its parent lnode | ||
2119 | */ | ||
2120 | if (ln->pln) | ||
2121 | ln->pln->num_vports--; | ||
2122 | |||
2123 | /* Update root lnode pointer */ | ||
2124 | if (list_empty(&hw->sln_head)) | ||
2125 | hw->rln = NULL; | ||
2126 | else | ||
2127 | hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head); | ||
2128 | |||
2129 | spin_unlock_irq(&hw->lock); | ||
2130 | |||
2131 | csio_lnode_to_hw(ln) = NULL; | ||
2132 | hw->num_lns--; | ||
2133 | } | ||
diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h new file mode 100644 index 000000000000..8d84988ab06d --- /dev/null +++ b/drivers/scsi/csiostor/csio_lnode.h | |||
@@ -0,0 +1,255 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef __CSIO_LNODE_H__ | ||
36 | #define __CSIO_LNODE_H__ | ||
37 | |||
38 | #include <linux/kref.h> | ||
39 | #include <linux/timer.h> | ||
40 | #include <linux/workqueue.h> | ||
41 | #include <scsi/fc/fc_els.h> | ||
42 | |||
43 | |||
44 | #include "csio_defs.h" | ||
45 | #include "csio_hw.h" | ||
46 | |||
47 | #define CSIO_FCOE_MAX_NPIV 128 | ||
48 | #define CSIO_FCOE_MAX_RNODES 2048 | ||
49 | |||
50 | /* FDMI port attribute unknown speed */ | ||
51 | #define CSIO_HBA_PORTSPEED_UNKNOWN 0x8000 | ||
52 | |||
53 | extern int csio_fcoe_rnodes; | ||
54 | extern int csio_fdmi_enable; | ||
55 | |||
56 | /* State machine evets */ | ||
57 | enum csio_ln_ev { | ||
58 | CSIO_LNE_NONE = (uint32_t)0, | ||
59 | CSIO_LNE_LINKUP, | ||
60 | CSIO_LNE_FAB_INIT_DONE, | ||
61 | CSIO_LNE_LINK_DOWN, | ||
62 | CSIO_LNE_DOWN_LINK, | ||
63 | CSIO_LNE_LOGO, | ||
64 | CSIO_LNE_CLOSE, | ||
65 | CSIO_LNE_MAX_EVENT, | ||
66 | }; | ||
67 | |||
68 | |||
69 | struct csio_fcf_info { | ||
70 | struct list_head list; | ||
71 | uint8_t priority; | ||
72 | uint8_t mac[6]; | ||
73 | uint8_t name_id[8]; | ||
74 | uint8_t fabric[8]; | ||
75 | uint16_t vf_id; | ||
76 | uint8_t vlan_id; | ||
77 | uint16_t max_fcoe_size; | ||
78 | uint8_t fc_map[3]; | ||
79 | uint32_t fka_adv; | ||
80 | uint32_t fcfi; | ||
81 | uint8_t get_next:1; | ||
82 | uint8_t link_aff:1; | ||
83 | uint8_t fpma:1; | ||
84 | uint8_t spma:1; | ||
85 | uint8_t login:1; | ||
86 | uint8_t portid; | ||
87 | uint8_t spma_mac[6]; | ||
88 | struct kref kref; | ||
89 | }; | ||
90 | |||
91 | /* Defines for flags */ | ||
92 | #define CSIO_LNF_FIPSUPP 0x00000001 /* Fip Supported */ | ||
93 | #define CSIO_LNF_NPIVSUPP 0x00000002 /* NPIV supported */ | ||
94 | #define CSIO_LNF_LINK_ENABLE 0x00000004 /* Link enabled */ | ||
95 | #define CSIO_LNF_FDMI_ENABLE 0x00000008 /* FDMI support */ | ||
96 | |||
97 | /* Transport events */ | ||
98 | enum csio_ln_fc_evt { | ||
99 | CSIO_LN_FC_LINKUP = 1, | ||
100 | CSIO_LN_FC_LINKDOWN, | ||
101 | CSIO_LN_FC_RSCN, | ||
102 | CSIO_LN_FC_ATTRIB_UPDATE, | ||
103 | }; | ||
104 | |||
105 | /* Lnode stats */ | ||
106 | struct csio_lnode_stats { | ||
107 | uint32_t n_link_up; /* Link down */ | ||
108 | uint32_t n_link_down; /* Link up */ | ||
109 | uint32_t n_err; /* error */ | ||
110 | uint32_t n_err_nomem; /* memory not available */ | ||
111 | uint32_t n_inval_parm; /* Invalid parameters */ | ||
112 | uint32_t n_evt_unexp; /* unexpected event */ | ||
113 | uint32_t n_evt_drop; /* dropped event */ | ||
114 | uint32_t n_rnode_match; /* matched rnode */ | ||
115 | uint32_t n_dev_loss_tmo; /* Device loss timeout */ | ||
116 | uint32_t n_fdmi_err; /* fdmi err */ | ||
117 | uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */ | ||
118 | enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */ | ||
119 | uint32_t n_rnode_alloc; /* rnode allocated */ | ||
120 | uint32_t n_rnode_free; /* rnode freed */ | ||
121 | uint32_t n_rnode_nomem; /* rnode alloc failure */ | ||
122 | uint32_t n_input_requests; /* Input Requests */ | ||
123 | uint32_t n_output_requests; /* Output Requests */ | ||
124 | uint32_t n_control_requests; /* Control Requests */ | ||
125 | uint32_t n_input_bytes; /* Input Bytes */ | ||
126 | uint32_t n_output_bytes; /* Output Bytes */ | ||
127 | uint32_t rsvd1; | ||
128 | }; | ||
129 | |||
130 | /* Common Lnode params */ | ||
131 | struct csio_lnode_params { | ||
132 | uint32_t ra_tov; | ||
133 | uint32_t fcfi; | ||
134 | uint32_t log_level; /* Module level for debugging */ | ||
135 | }; | ||
136 | |||
137 | struct csio_service_parms { | ||
138 | struct fc_els_csp csp; /* Common service parms */ | ||
139 | uint8_t wwpn[8]; /* WWPN */ | ||
140 | uint8_t wwnn[8]; /* WWNN */ | ||
141 | struct fc_els_cssp clsp[4]; /* Class service params */ | ||
142 | uint8_t vvl[16]; /* Vendor version level */ | ||
143 | }; | ||
144 | |||
145 | /* Lnode */ | ||
146 | struct csio_lnode { | ||
147 | struct csio_sm sm; /* State machine + sibling | ||
148 | * lnode list. | ||
149 | */ | ||
150 | struct csio_hw *hwp; /* Pointer to the HW module */ | ||
151 | uint8_t portid; /* Port ID */ | ||
152 | uint8_t rsvd1; | ||
153 | uint16_t rsvd2; | ||
154 | uint32_t dev_num; /* Device number */ | ||
155 | uint32_t flags; /* Flags */ | ||
156 | struct list_head fcf_lsthead; /* FCF entries */ | ||
157 | struct csio_fcf_info *fcfinfo; /* FCF in use */ | ||
158 | struct csio_ioreq *mgmt_req; /* MGMT request */ | ||
159 | |||
160 | /* FCoE identifiers */ | ||
161 | uint8_t mac[6]; | ||
162 | uint32_t nport_id; | ||
163 | struct csio_service_parms ln_sparm; /* Service parms */ | ||
164 | |||
165 | /* Firmware identifiers */ | ||
166 | uint32_t fcf_flowid; /*fcf flowid */ | ||
167 | uint32_t vnp_flowid; | ||
168 | uint16_t ssn_cnt; /* Registered Session */ | ||
169 | uint8_t cur_evt; /* Current event */ | ||
170 | uint8_t prev_evt; /* Previous event */ | ||
171 | |||
172 | /* Children */ | ||
173 | struct list_head cln_head; /* Head of the children lnode | ||
174 | * list. | ||
175 | */ | ||
176 | uint32_t num_vports; /* Total NPIV/children LNodes*/ | ||
177 | struct csio_lnode *pln; /* Parent lnode of child | ||
178 | * lnodes. | ||
179 | */ | ||
180 | struct list_head cmpl_q; /* Pending I/Os on this lnode */ | ||
181 | |||
182 | /* Remote node information */ | ||
183 | struct list_head rnhead; /* Head of rnode list */ | ||
184 | uint32_t num_reg_rnodes; /* Number of rnodes registered | ||
185 | * with the host. | ||
186 | */ | ||
187 | uint32_t n_scsi_tgts; /* Number of scsi targets | ||
188 | * found | ||
189 | */ | ||
190 | uint32_t last_scan_ntgts;/* Number of scsi targets | ||
191 | * found per last scan. | ||
192 | */ | ||
193 | uint32_t tgt_scan_tick; /* timer started after | ||
194 | * new tgt found | ||
195 | */ | ||
196 | /* FC transport data */ | ||
197 | struct fc_vport *fc_vport; | ||
198 | struct fc_host_statistics fch_stats; | ||
199 | |||
200 | struct csio_lnode_stats stats; /* Common lnode stats */ | ||
201 | struct csio_lnode_params params; /* Common lnode params */ | ||
202 | }; | ||
203 | |||
204 | #define csio_lnode_to_hw(ln) ((ln)->hwp) | ||
205 | #define csio_root_lnode(ln) (csio_lnode_to_hw((ln))->rln) | ||
206 | #define csio_parent_lnode(ln) ((ln)->pln) | ||
207 | #define csio_ln_flowid(ln) ((ln)->vnp_flowid) | ||
208 | #define csio_ln_wwpn(ln) ((ln)->ln_sparm.wwpn) | ||
209 | #define csio_ln_wwnn(ln) ((ln)->ln_sparm.wwnn) | ||
210 | |||
211 | #define csio_is_root_ln(ln) (((ln) == csio_root_lnode((ln))) ? 1 : 0) | ||
212 | #define csio_is_phys_ln(ln) (((ln)->pln == NULL) ? 1 : 0) | ||
213 | #define csio_is_npiv_ln(ln) (((ln)->pln != NULL) ? 1 : 0) | ||
214 | |||
215 | |||
216 | #define csio_ln_dbg(_ln, _fmt, ...) \ | ||
217 | csio_dbg(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \ | ||
218 | CSIO_DEVID_LO(_ln), ##__VA_ARGS__); | ||
219 | |||
220 | #define csio_ln_err(_ln, _fmt, ...) \ | ||
221 | csio_err(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \ | ||
222 | CSIO_DEVID_LO(_ln), ##__VA_ARGS__); | ||
223 | |||
224 | #define csio_ln_warn(_ln, _fmt, ...) \ | ||
225 | csio_warn(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \ | ||
226 | CSIO_DEVID_LO(_ln), ##__VA_ARGS__); | ||
227 | |||
228 | /* HW->Lnode notifications */ | ||
229 | enum csio_ln_notify { | ||
230 | CSIO_LN_NOTIFY_HWREADY = 1, | ||
231 | CSIO_LN_NOTIFY_HWSTOP, | ||
232 | CSIO_LN_NOTIFY_HWREMOVE, | ||
233 | CSIO_LN_NOTIFY_HWRESET, | ||
234 | }; | ||
235 | |||
236 | void csio_fcoe_fwevt_handler(struct csio_hw *, __u8 cpl_op, __be64 *); | ||
237 | int csio_is_lnode_ready(struct csio_lnode *); | ||
238 | void csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str); | ||
239 | struct csio_lnode *csio_lnode_lookup_by_wwpn(struct csio_hw *, uint8_t *); | ||
240 | int csio_get_phy_port_stats(struct csio_hw *, uint8_t , | ||
241 | struct fw_fcoe_port_stats *); | ||
242 | int csio_scan_done(struct csio_lnode *, unsigned long, unsigned long, | ||
243 | unsigned long, unsigned long); | ||
244 | void csio_notify_lnodes(struct csio_hw *, enum csio_ln_notify); | ||
245 | void csio_disable_lnodes(struct csio_hw *, uint8_t, bool); | ||
246 | void csio_lnode_async_event(struct csio_lnode *, enum csio_ln_fc_evt); | ||
247 | int csio_ln_fdmi_start(struct csio_lnode *, void *); | ||
248 | int csio_lnode_start(struct csio_lnode *); | ||
249 | void csio_lnode_stop(struct csio_lnode *); | ||
250 | void csio_lnode_close(struct csio_lnode *); | ||
251 | int csio_lnode_init(struct csio_lnode *, struct csio_hw *, | ||
252 | struct csio_lnode *); | ||
253 | void csio_lnode_exit(struct csio_lnode *); | ||
254 | |||
255 | #endif /* ifndef __CSIO_LNODE_H__ */ | ||
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c new file mode 100644 index 000000000000..7aaf1027688c --- /dev/null +++ b/drivers/scsi/csiostor/csio_mb.c | |||
@@ -0,0 +1,1770 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/delay.h> | ||
36 | #include <linux/jiffies.h> | ||
37 | #include <linux/string.h> | ||
38 | #include <scsi/scsi_device.h> | ||
39 | #include <scsi/scsi_transport_fc.h> | ||
40 | |||
41 | #include "csio_hw.h" | ||
42 | #include "csio_lnode.h" | ||
43 | #include "csio_rnode.h" | ||
44 | #include "csio_mb.h" | ||
45 | #include "csio_wr.h" | ||
46 | |||
47 | #define csio_mb_is_host_owner(__owner) ((__owner) == CSIO_MBOWNER_PL) | ||
48 | |||
49 | /* MB Command/Response Helpers */ | ||
50 | /* | ||
51 | * csio_mb_fw_retval - FW return value from a mailbox response. | ||
52 | * @mbp: Mailbox structure | ||
53 | * | ||
54 | */ | ||
55 | enum fw_retval | ||
56 | csio_mb_fw_retval(struct csio_mb *mbp) | ||
57 | { | ||
58 | struct fw_cmd_hdr *hdr; | ||
59 | |||
60 | hdr = (struct fw_cmd_hdr *)(mbp->mb); | ||
61 | |||
62 | return FW_CMD_RETVAL_GET(ntohl(hdr->lo)); | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * csio_mb_hello - FW HELLO command helper | ||
67 | * @hw: The HW structure | ||
68 | * @mbp: Mailbox structure | ||
69 | * @m_mbox: Master mailbox number, if any. | ||
70 | * @a_mbox: Mailbox number for asycn notifications. | ||
71 | * @master: Device mastership. | ||
72 | * @cbfn: Callback, if any. | ||
73 | * | ||
74 | */ | ||
75 | void | ||
76 | csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, | ||
77 | uint32_t m_mbox, uint32_t a_mbox, enum csio_dev_master master, | ||
78 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
79 | { | ||
80 | struct fw_hello_cmd *cmdp = (struct fw_hello_cmd *)(mbp->mb); | ||
81 | |||
82 | CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); | ||
83 | |||
84 | cmdp->op_to_write = htonl(FW_CMD_OP(FW_HELLO_CMD) | | ||
85 | FW_CMD_REQUEST | FW_CMD_WRITE); | ||
86 | cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
87 | cmdp->err_to_clearinit = htonl( | ||
88 | FW_HELLO_CMD_MASTERDIS(master == CSIO_MASTER_CANT) | | ||
89 | FW_HELLO_CMD_MASTERFORCE(master == CSIO_MASTER_MUST) | | ||
90 | FW_HELLO_CMD_MBMASTER(master == CSIO_MASTER_MUST ? | ||
91 | m_mbox : FW_HELLO_CMD_MBMASTER_MASK) | | ||
92 | FW_HELLO_CMD_MBASYNCNOT(a_mbox) | | ||
93 | FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) | | ||
94 | FW_HELLO_CMD_CLEARINIT); | ||
95 | |||
96 | } | ||
97 | |||
98 | /* | ||
99 | * csio_mb_process_hello_rsp - FW HELLO response processing helper | ||
100 | * @hw: The HW structure | ||
101 | * @mbp: Mailbox structure | ||
102 | * @retval: Mailbox return value from Firmware | ||
103 | * @state: State that the function is in. | ||
104 | * @mpfn: Master pfn | ||
105 | * | ||
106 | */ | ||
107 | void | ||
108 | csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp, | ||
109 | enum fw_retval *retval, enum csio_dev_state *state, | ||
110 | uint8_t *mpfn) | ||
111 | { | ||
112 | struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb); | ||
113 | uint32_t value; | ||
114 | |||
115 | *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); | ||
116 | |||
117 | if (*retval == FW_SUCCESS) { | ||
118 | hw->fwrev = ntohl(rsp->fwrev); | ||
119 | |||
120 | value = ntohl(rsp->err_to_clearinit); | ||
121 | *mpfn = FW_HELLO_CMD_MBMASTER_GET(value); | ||
122 | |||
123 | if (value & FW_HELLO_CMD_INIT) | ||
124 | *state = CSIO_DEV_STATE_INIT; | ||
125 | else if (value & FW_HELLO_CMD_ERR) | ||
126 | *state = CSIO_DEV_STATE_ERR; | ||
127 | else | ||
128 | *state = CSIO_DEV_STATE_UNINIT; | ||
129 | } | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * csio_mb_bye - FW BYE command helper | ||
134 | * @hw: The HW structure | ||
135 | * @mbp: Mailbox structure | ||
136 | * @cbfn: Callback, if any. | ||
137 | * | ||
138 | */ | ||
139 | void | ||
140 | csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, | ||
141 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
142 | { | ||
143 | struct fw_bye_cmd *cmdp = (struct fw_bye_cmd *)(mbp->mb); | ||
144 | |||
145 | CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); | ||
146 | |||
147 | cmdp->op_to_write = htonl(FW_CMD_OP(FW_BYE_CMD) | | ||
148 | FW_CMD_REQUEST | FW_CMD_WRITE); | ||
149 | cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
150 | |||
151 | } | ||
152 | |||
153 | /* | ||
154 | * csio_mb_reset - FW RESET command helper | ||
155 | * @hw: The HW structure | ||
156 | * @mbp: Mailbox structure | ||
157 | * @reset: Type of reset. | ||
158 | * @cbfn: Callback, if any. | ||
159 | * | ||
160 | */ | ||
161 | void | ||
162 | csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, | ||
163 | int reset, int halt, | ||
164 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
165 | { | ||
166 | struct fw_reset_cmd *cmdp = (struct fw_reset_cmd *)(mbp->mb); | ||
167 | |||
168 | CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); | ||
169 | |||
170 | cmdp->op_to_write = htonl(FW_CMD_OP(FW_RESET_CMD) | | ||
171 | FW_CMD_REQUEST | FW_CMD_WRITE); | ||
172 | cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
173 | cmdp->val = htonl(reset); | ||
174 | cmdp->halt_pkd = htonl(halt); | ||
175 | |||
176 | } | ||
177 | |||
178 | /* | ||
179 | * csio_mb_params - FW PARAMS command helper | ||
180 | * @hw: The HW structure | ||
181 | * @mbp: Mailbox structure | ||
182 | * @tmo: Command timeout. | ||
183 | * @pf: PF number. | ||
184 | * @vf: VF number. | ||
185 | * @nparams: Number of paramters | ||
186 | * @params: Parameter mnemonic array. | ||
187 | * @val: Parameter value array. | ||
188 | * @wr: Write/Read PARAMS. | ||
189 | * @cbfn: Callback, if any. | ||
190 | * | ||
191 | */ | ||
192 | void | ||
193 | csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, | ||
194 | unsigned int pf, unsigned int vf, unsigned int nparams, | ||
195 | const u32 *params, u32 *val, bool wr, | ||
196 | void (*cbfn)(struct csio_hw *, struct csio_mb *)) | ||
197 | { | ||
198 | uint32_t i; | ||
199 | uint32_t temp_params = 0, temp_val = 0; | ||
200 | struct fw_params_cmd *cmdp = (struct fw_params_cmd *)(mbp->mb); | ||
201 | __be32 *p = &cmdp->param[0].mnem; | ||
202 | |||
203 | CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); | ||
204 | |||
205 | cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | | ||
206 | FW_CMD_REQUEST | | ||
207 | (wr ? FW_CMD_WRITE : FW_CMD_READ) | | ||
208 | FW_PARAMS_CMD_PFN(pf) | | ||
209 | FW_PARAMS_CMD_VFN(vf)); | ||
210 | cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
211 | |||
212 | /* Write Params */ | ||
213 | if (wr) { | ||
214 | while (nparams--) { | ||
215 | temp_params = *params++; | ||
216 | temp_val = *val++; | ||
217 | |||
218 | *p++ = htonl(temp_params); | ||
219 | *p++ = htonl(temp_val); | ||
220 | } | ||
221 | } else { | ||
222 | for (i = 0; i < nparams; i++, p += 2) { | ||
223 | temp_params = *params++; | ||
224 | *p = htonl(temp_params); | ||
225 | } | ||
226 | } | ||
227 | |||
228 | } | ||
229 | |||
230 | /* | ||
231 | * csio_mb_process_read_params_rsp - FW PARAMS response processing helper | ||
232 | * @hw: The HW structure | ||
233 | * @mbp: Mailbox structure | ||
234 | * @retval: Mailbox return value from Firmware | ||
235 | * @nparams: Number of parameters | ||
236 | * @val: Parameter value array. | ||
237 | * | ||
238 | */ | ||
239 | void | ||
240 | csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp, | ||
241 | enum fw_retval *retval, unsigned int nparams, | ||
242 | u32 *val) | ||
243 | { | ||
244 | struct fw_params_cmd *rsp = (struct fw_params_cmd *)(mbp->mb); | ||
245 | uint32_t i; | ||
246 | __be32 *p = &rsp->param[0].val; | ||
247 | |||
248 | *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); | ||
249 | |||
250 | if (*retval == FW_SUCCESS) | ||
251 | for (i = 0; i < nparams; i++, p += 2) | ||
252 | *val++ = ntohl(*p); | ||
253 | } | ||
254 | |||
255 | /* | ||
256 | * csio_mb_ldst - FW LDST command | ||
257 | * @hw: The HW structure | ||
258 | * @mbp: Mailbox structure | ||
259 | * @tmo: timeout | ||
260 | * @reg: register | ||
261 | * | ||
262 | */ | ||
263 | void | ||
264 | csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg) | ||
265 | { | ||
266 | struct fw_ldst_cmd *ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb); | ||
267 | CSIO_INIT_MBP(mbp, ldst_cmd, tmo, hw, NULL, 1); | ||
268 | |||
269 | /* | ||
270 | * Construct and send the Firmware LDST Command to retrieve the | ||
271 | * specified PCI-E Configuration Space register. | ||
272 | */ | ||
273 | ldst_cmd->op_to_addrspace = | ||
274 | htonl(FW_CMD_OP(FW_LDST_CMD) | | ||
275 | FW_CMD_REQUEST | | ||
276 | FW_CMD_READ | | ||
277 | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); | ||
278 | ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd)); | ||
279 | ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1); | ||
280 | ldst_cmd->u.pcie.ctrl_to_fn = | ||
281 | (FW_LDST_CMD_LC | FW_LDST_CMD_FN(hw->pfn)); | ||
282 | ldst_cmd->u.pcie.r = (uint8_t)reg; | ||
283 | } | ||
284 | |||
285 | /* | ||
286 | * | ||
287 | * csio_mb_caps_config - FW Read/Write Capabilities command helper | ||
288 | * @hw: The HW structure | ||
289 | * @mbp: Mailbox structure | ||
290 | * @wr: Write if 1, Read if 0 | ||
291 | * @init: Turn on initiator mode. | ||
292 | * @tgt: Turn on target mode. | ||
293 | * @cofld: If 1, Control Offload for FCoE | ||
294 | * @cbfn: Callback, if any. | ||
295 | * | ||
296 | * This helper assumes that cmdp has MB payload from a previous CAPS | ||
297 | * read command. | ||
298 | */ | ||
299 | void | ||
300 | csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, | ||
301 | bool wr, bool init, bool tgt, bool cofld, | ||
302 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
303 | { | ||
304 | struct fw_caps_config_cmd *cmdp = | ||
305 | (struct fw_caps_config_cmd *)(mbp->mb); | ||
306 | |||
307 | CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1); | ||
308 | |||
309 | cmdp->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | | ||
310 | FW_CMD_REQUEST | | ||
311 | (wr ? FW_CMD_WRITE : FW_CMD_READ)); | ||
312 | cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
313 | |||
314 | /* Read config */ | ||
315 | if (!wr) | ||
316 | return; | ||
317 | |||
318 | /* Write config */ | ||
319 | cmdp->fcoecaps = 0; | ||
320 | |||
321 | if (cofld) | ||
322 | cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_CTRL_OFLD); | ||
323 | if (init) | ||
324 | cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_INITIATOR); | ||
325 | if (tgt) | ||
326 | cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET); | ||
327 | } | ||
328 | |||
329 | void | ||
330 | csio_rss_glb_config(struct csio_hw *hw, struct csio_mb *mbp, | ||
331 | uint32_t tmo, uint8_t mode, unsigned int flags, | ||
332 | void (*cbfn)(struct csio_hw *, struct csio_mb *)) | ||
333 | { | ||
334 | struct fw_rss_glb_config_cmd *cmdp = | ||
335 | (struct fw_rss_glb_config_cmd *)(mbp->mb); | ||
336 | |||
337 | CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); | ||
338 | |||
339 | cmdp->op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | | ||
340 | FW_CMD_REQUEST | FW_CMD_WRITE); | ||
341 | cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
342 | |||
343 | if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { | ||
344 | cmdp->u.manual.mode_pkd = | ||
345 | htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); | ||
346 | } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { | ||
347 | cmdp->u.basicvirtual.mode_pkd = | ||
348 | htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); | ||
349 | cmdp->u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); | ||
350 | } | ||
351 | } | ||
352 | |||
353 | |||
354 | /* | ||
355 | * csio_mb_pfvf - FW Write PF/VF capabilities command helper. | ||
356 | * @hw: The HW structure | ||
357 | * @mbp: Mailbox structure | ||
358 | * @pf: | ||
359 | * @vf: | ||
360 | * @txq: | ||
361 | * @txq_eht_ctrl: | ||
362 | * @rxqi: | ||
363 | * @rxq: | ||
364 | * @tc: | ||
365 | * @vi: | ||
366 | * @pmask: | ||
367 | * @rcaps: | ||
368 | * @wxcaps: | ||
369 | * @cbfn: Callback, if any. | ||
370 | * | ||
371 | */ | ||
372 | void | ||
373 | csio_mb_pfvf(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, | ||
374 | unsigned int pf, unsigned int vf, unsigned int txq, | ||
375 | unsigned int txq_eth_ctrl, unsigned int rxqi, | ||
376 | unsigned int rxq, unsigned int tc, unsigned int vi, | ||
377 | unsigned int cmask, unsigned int pmask, unsigned int nexactf, | ||
378 | unsigned int rcaps, unsigned int wxcaps, | ||
379 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
380 | { | ||
381 | struct fw_pfvf_cmd *cmdp = (struct fw_pfvf_cmd *)(mbp->mb); | ||
382 | |||
383 | CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); | ||
384 | |||
385 | cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | | ||
386 | FW_CMD_REQUEST | | ||
387 | FW_CMD_WRITE | | ||
388 | FW_PFVF_CMD_PFN(pf) | | ||
389 | FW_PFVF_CMD_VFN(vf)); | ||
390 | cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
391 | cmdp->niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | | ||
392 | FW_PFVF_CMD_NIQ(rxq)); | ||
393 | |||
394 | cmdp->type_to_neq = htonl(FW_PFVF_CMD_TYPE | | ||
395 | FW_PFVF_CMD_CMASK(cmask) | | ||
396 | FW_PFVF_CMD_PMASK(pmask) | | ||
397 | FW_PFVF_CMD_NEQ(txq)); | ||
398 | cmdp->tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | | ||
399 | FW_PFVF_CMD_NVI(vi) | | ||
400 | FW_PFVF_CMD_NEXACTF(nexactf)); | ||
401 | cmdp->r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | | ||
402 | FW_PFVF_CMD_WX_CAPS(wxcaps) | | ||
403 | FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); | ||
404 | } | ||
405 | |||
406 | #define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ | ||
407 | FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) | ||
408 | |||
409 | /* | ||
410 | * csio_mb_port- FW PORT command helper | ||
411 | * @hw: The HW structure | ||
412 | * @mbp: Mailbox structure | ||
413 | * @tmo: COmmand timeout | ||
414 | * @portid: Port ID to get/set info | ||
415 | * @wr: Write/Read PORT information. | ||
416 | * @fc: Flow control | ||
417 | * @caps: Port capabilites to set. | ||
418 | * @cbfn: Callback, if any. | ||
419 | * | ||
420 | */ | ||
421 | void | ||
422 | csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, | ||
423 | uint8_t portid, bool wr, uint32_t fc, uint16_t caps, | ||
424 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
425 | { | ||
426 | struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb); | ||
427 | unsigned int lfc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); | ||
428 | |||
429 | CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); | ||
430 | |||
431 | cmdp->op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | | ||
432 | FW_CMD_REQUEST | | ||
433 | (wr ? FW_CMD_EXEC : FW_CMD_READ) | | ||
434 | FW_PORT_CMD_PORTID(portid)); | ||
435 | if (!wr) { | ||
436 | cmdp->action_to_len16 = htonl( | ||
437 | FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | | ||
438 | FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
439 | return; | ||
440 | } | ||
441 | |||
442 | /* Set port */ | ||
443 | cmdp->action_to_len16 = htonl( | ||
444 | FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | | ||
445 | FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
446 | |||
447 | if (fc & PAUSE_RX) | ||
448 | lfc |= FW_PORT_CAP_FC_RX; | ||
449 | if (fc & PAUSE_TX) | ||
450 | lfc |= FW_PORT_CAP_FC_TX; | ||
451 | |||
452 | if (!(caps & FW_PORT_CAP_ANEG)) | ||
453 | cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) | lfc); | ||
454 | else | ||
455 | cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) | | ||
456 | lfc | mdi); | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * csio_mb_process_read_port_rsp - FW PORT command response processing helper | ||
461 | * @hw: The HW structure | ||
462 | * @mbp: Mailbox structure | ||
463 | * @retval: Mailbox return value from Firmware | ||
464 | * @caps: port capabilities | ||
465 | * | ||
466 | */ | ||
467 | void | ||
468 | csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp, | ||
469 | enum fw_retval *retval, uint16_t *caps) | ||
470 | { | ||
471 | struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb); | ||
472 | |||
473 | *retval = FW_CMD_RETVAL_GET(ntohl(rsp->action_to_len16)); | ||
474 | |||
475 | if (*retval == FW_SUCCESS) | ||
476 | *caps = ntohs(rsp->u.info.pcap); | ||
477 | } | ||
478 | |||
479 | /* | ||
480 | * csio_mb_initialize - FW INITIALIZE command helper | ||
481 | * @hw: The HW structure | ||
482 | * @mbp: Mailbox structure | ||
483 | * @tmo: COmmand timeout | ||
484 | * @cbfn: Callback, if any. | ||
485 | * | ||
486 | */ | ||
487 | void | ||
488 | csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, | ||
489 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
490 | { | ||
491 | struct fw_initialize_cmd *cmdp = (struct fw_initialize_cmd *)(mbp->mb); | ||
492 | |||
493 | CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); | ||
494 | |||
495 | cmdp->op_to_write = htonl(FW_CMD_OP(FW_INITIALIZE_CMD) | | ||
496 | FW_CMD_REQUEST | FW_CMD_WRITE); | ||
497 | cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
498 | |||
499 | } | ||
500 | |||
501 | /* | ||
502 | * csio_mb_iq_alloc - Initializes the mailbox to allocate an | ||
503 | * Ingress DMA queue in the firmware. | ||
504 | * | ||
505 | * @hw: The hw structure | ||
506 | * @mbp: Mailbox structure to initialize | ||
507 | * @priv: Private object | ||
508 | * @mb_tmo: Mailbox time-out period (in ms). | ||
509 | * @iq_params: Ingress queue params needed for allocation. | ||
510 | * @cbfn: The call-back function | ||
511 | * | ||
512 | * | ||
513 | */ | ||
514 | static void | ||
515 | csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv, | ||
516 | uint32_t mb_tmo, struct csio_iq_params *iq_params, | ||
517 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
518 | { | ||
519 | struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb); | ||
520 | |||
521 | CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); | ||
522 | |||
523 | cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | | ||
524 | FW_CMD_REQUEST | FW_CMD_EXEC | | ||
525 | FW_IQ_CMD_PFN(iq_params->pfn) | | ||
526 | FW_IQ_CMD_VFN(iq_params->vfn)); | ||
527 | |||
528 | cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | | ||
529 | FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
530 | |||
531 | cmdp->type_to_iqandstindex = htonl( | ||
532 | FW_IQ_CMD_VIID(iq_params->viid) | | ||
533 | FW_IQ_CMD_TYPE(iq_params->type) | | ||
534 | FW_IQ_CMD_IQASYNCH(iq_params->iqasynch)); | ||
535 | |||
536 | cmdp->fl0size = htons(iq_params->fl0size); | ||
537 | cmdp->fl0size = htons(iq_params->fl1size); | ||
538 | |||
539 | } /* csio_mb_iq_alloc */ | ||
540 | |||
541 | /* | ||
542 | * csio_mb_iq_write - Initializes the mailbox for writing into an | ||
543 | * Ingress DMA Queue. | ||
544 | * | ||
545 | * @hw: The HW structure | ||
546 | * @mbp: Mailbox structure to initialize | ||
547 | * @priv: Private object | ||
548 | * @mb_tmo: Mailbox time-out period (in ms). | ||
549 | * @cascaded_req: TRUE - if this request is cascased with iq-alloc request. | ||
550 | * @iq_params: Ingress queue params needed for writing. | ||
551 | * @cbfn: The call-back function | ||
552 | * | ||
553 | * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating, | ||
554 | * because this IQ write request can be cascaded with a previous | ||
555 | * IQ alloc request, and we dont want to over-write the bits set by | ||
556 | * that request. This logic will work even in a non-cascaded case, since the | ||
557 | * cmdp structure is zeroed out by CSIO_INIT_MBP. | ||
558 | */ | ||
559 | static void | ||
560 | csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, | ||
561 | uint32_t mb_tmo, bool cascaded_req, | ||
562 | struct csio_iq_params *iq_params, | ||
563 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
564 | { | ||
565 | struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb); | ||
566 | |||
567 | uint32_t iq_start_stop = (iq_params->iq_start) ? | ||
568 | FW_IQ_CMD_IQSTART(1) : | ||
569 | FW_IQ_CMD_IQSTOP(1); | ||
570 | |||
571 | /* | ||
572 | * If this IQ write is cascaded with IQ alloc request, do not | ||
573 | * re-initialize with 0's. | ||
574 | * | ||
575 | */ | ||
576 | if (!cascaded_req) | ||
577 | CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); | ||
578 | |||
579 | cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_IQ_CMD) | | ||
580 | FW_CMD_REQUEST | FW_CMD_WRITE | | ||
581 | FW_IQ_CMD_PFN(iq_params->pfn) | | ||
582 | FW_IQ_CMD_VFN(iq_params->vfn)); | ||
583 | cmdp->alloc_to_len16 |= htonl(iq_start_stop | | ||
584 | FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
585 | cmdp->iqid |= htons(iq_params->iqid); | ||
586 | cmdp->fl0id |= htons(iq_params->fl0id); | ||
587 | cmdp->fl1id |= htons(iq_params->fl1id); | ||
588 | cmdp->type_to_iqandstindex |= htonl( | ||
589 | FW_IQ_CMD_IQANDST(iq_params->iqandst) | | ||
590 | FW_IQ_CMD_IQANUS(iq_params->iqanus) | | ||
591 | FW_IQ_CMD_IQANUD(iq_params->iqanud) | | ||
592 | FW_IQ_CMD_IQANDSTINDEX(iq_params->iqandstindex)); | ||
593 | cmdp->iqdroprss_to_iqesize |= htons( | ||
594 | FW_IQ_CMD_IQPCIECH(iq_params->iqpciech) | | ||
595 | FW_IQ_CMD_IQDCAEN(iq_params->iqdcaen) | | ||
596 | FW_IQ_CMD_IQDCACPU(iq_params->iqdcacpu) | | ||
597 | FW_IQ_CMD_IQINTCNTTHRESH(iq_params->iqintcntthresh) | | ||
598 | FW_IQ_CMD_IQCPRIO(iq_params->iqcprio) | | ||
599 | FW_IQ_CMD_IQESIZE(iq_params->iqesize)); | ||
600 | |||
601 | cmdp->iqsize |= htons(iq_params->iqsize); | ||
602 | cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr); | ||
603 | |||
604 | if (iq_params->type == 0) { | ||
605 | cmdp->iqns_to_fl0congen |= htonl( | ||
606 | FW_IQ_CMD_IQFLINTIQHSEN(iq_params->iqflintiqhsen)| | ||
607 | FW_IQ_CMD_IQFLINTCONGEN(iq_params->iqflintcongen)); | ||
608 | } | ||
609 | |||
610 | if (iq_params->fl0size && iq_params->fl0addr && | ||
611 | (iq_params->fl0id != 0xFFFF)) { | ||
612 | |||
613 | cmdp->iqns_to_fl0congen |= htonl( | ||
614 | FW_IQ_CMD_FL0HOSTFCMODE(iq_params->fl0hostfcmode)| | ||
615 | FW_IQ_CMD_FL0CPRIO(iq_params->fl0cprio) | | ||
616 | FW_IQ_CMD_FL0PADEN(iq_params->fl0paden) | | ||
617 | FW_IQ_CMD_FL0PACKEN(iq_params->fl0packen)); | ||
618 | cmdp->fl0dcaen_to_fl0cidxfthresh |= htons( | ||
619 | FW_IQ_CMD_FL0DCAEN(iq_params->fl0dcaen) | | ||
620 | FW_IQ_CMD_FL0DCACPU(iq_params->fl0dcacpu) | | ||
621 | FW_IQ_CMD_FL0FBMIN(iq_params->fl0fbmin) | | ||
622 | FW_IQ_CMD_FL0FBMAX(iq_params->fl0fbmax) | | ||
623 | FW_IQ_CMD_FL0CIDXFTHRESH(iq_params->fl0cidxfthresh)); | ||
624 | cmdp->fl0size |= htons(iq_params->fl0size); | ||
625 | cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr); | ||
626 | } | ||
627 | } /* csio_mb_iq_write */ | ||
628 | |||
629 | /* | ||
630 | * csio_mb_iq_alloc_write - Initializes the mailbox for allocating an | ||
631 | * Ingress DMA Queue. | ||
632 | * | ||
633 | * @hw: The HW structure | ||
634 | * @mbp: Mailbox structure to initialize | ||
635 | * @priv: Private data. | ||
636 | * @mb_tmo: Mailbox time-out period (in ms). | ||
637 | * @iq_params: Ingress queue params needed for allocation & writing. | ||
638 | * @cbfn: The call-back function | ||
639 | * | ||
640 | * | ||
641 | */ | ||
642 | void | ||
643 | csio_mb_iq_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, | ||
644 | uint32_t mb_tmo, struct csio_iq_params *iq_params, | ||
645 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
646 | { | ||
647 | csio_mb_iq_alloc(hw, mbp, priv, mb_tmo, iq_params, cbfn); | ||
648 | csio_mb_iq_write(hw, mbp, priv, mb_tmo, true, iq_params, cbfn); | ||
649 | } /* csio_mb_iq_alloc_write */ | ||
650 | |||
651 | /* | ||
652 | * csio_mb_iq_alloc_write_rsp - Process the allocation & writing | ||
653 | * of ingress DMA queue mailbox's response. | ||
654 | * | ||
655 | * @hw: The HW structure. | ||
656 | * @mbp: Mailbox structure to initialize. | ||
657 | * @retval: Firmware return value. | ||
658 | * @iq_params: Ingress queue parameters, after allocation and write. | ||
659 | * | ||
660 | */ | ||
661 | void | ||
662 | csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp, | ||
663 | enum fw_retval *ret_val, | ||
664 | struct csio_iq_params *iq_params) | ||
665 | { | ||
666 | struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb); | ||
667 | |||
668 | *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)); | ||
669 | if (*ret_val == FW_SUCCESS) { | ||
670 | iq_params->physiqid = ntohs(rsp->physiqid); | ||
671 | iq_params->iqid = ntohs(rsp->iqid); | ||
672 | iq_params->fl0id = ntohs(rsp->fl0id); | ||
673 | iq_params->fl1id = ntohs(rsp->fl1id); | ||
674 | } else { | ||
675 | iq_params->physiqid = iq_params->iqid = | ||
676 | iq_params->fl0id = iq_params->fl1id = 0; | ||
677 | } | ||
678 | } /* csio_mb_iq_alloc_write_rsp */ | ||
679 | |||
680 | /* | ||
681 | * csio_mb_iq_free - Initializes the mailbox for freeing a | ||
682 | * specified Ingress DMA Queue. | ||
683 | * | ||
684 | * @hw: The HW structure | ||
685 | * @mbp: Mailbox structure to initialize | ||
686 | * @priv: Private data | ||
687 | * @mb_tmo: Mailbox time-out period (in ms). | ||
688 | * @iq_params: Parameters of ingress queue, that is to be freed. | ||
689 | * @cbfn: The call-back function | ||
690 | * | ||
691 | * | ||
692 | */ | ||
693 | void | ||
694 | csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv, | ||
695 | uint32_t mb_tmo, struct csio_iq_params *iq_params, | ||
696 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
697 | { | ||
698 | struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb); | ||
699 | |||
700 | CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); | ||
701 | |||
702 | cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | | ||
703 | FW_CMD_REQUEST | FW_CMD_EXEC | | ||
704 | FW_IQ_CMD_PFN(iq_params->pfn) | | ||
705 | FW_IQ_CMD_VFN(iq_params->vfn)); | ||
706 | cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE | | ||
707 | FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
708 | cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iq_params->type)); | ||
709 | |||
710 | cmdp->iqid = htons(iq_params->iqid); | ||
711 | cmdp->fl0id = htons(iq_params->fl0id); | ||
712 | cmdp->fl1id = htons(iq_params->fl1id); | ||
713 | |||
714 | } /* csio_mb_iq_free */ | ||
715 | |||
716 | /* | ||
717 | * csio_mb_eq_ofld_alloc - Initializes the mailbox for allocating | ||
718 | * an offload-egress queue. | ||
719 | * | ||
720 | * @hw: The HW structure | ||
721 | * @mbp: Mailbox structure to initialize | ||
722 | * @priv: Private data | ||
723 | * @mb_tmo: Mailbox time-out period (in ms). | ||
724 | * @eq_ofld_params: (Offload) Egress queue paramters. | ||
725 | * @cbfn: The call-back function | ||
726 | * | ||
727 | * | ||
728 | */ | ||
729 | static void | ||
730 | csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv, | ||
731 | uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params, | ||
732 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
733 | { | ||
734 | struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb); | ||
735 | |||
736 | CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); | ||
737 | cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | | ||
738 | FW_CMD_REQUEST | FW_CMD_EXEC | | ||
739 | FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) | | ||
740 | FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn)); | ||
741 | cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC | | ||
742 | FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
743 | |||
744 | } /* csio_mb_eq_ofld_alloc */ | ||
745 | |||
746 | /* | ||
747 | * csio_mb_eq_ofld_write - Initializes the mailbox for writing | ||
748 | * an alloacted offload-egress queue. | ||
749 | * | ||
750 | * @hw: The HW structure | ||
751 | * @mbp: Mailbox structure to initialize | ||
752 | * @priv: Private data | ||
753 | * @mb_tmo: Mailbox time-out period (in ms). | ||
754 | * @cascaded_req: TRUE - if this request is cascased with Eq-alloc request. | ||
755 | * @eq_ofld_params: (Offload) Egress queue paramters. | ||
756 | * @cbfn: The call-back function | ||
757 | * | ||
758 | * | ||
759 | * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating, | ||
760 | * because this EQ write request can be cascaded with a previous | ||
761 | * EQ alloc request, and we dont want to over-write the bits set by | ||
762 | * that request. This logic will work even in a non-cascaded case, since the | ||
763 | * cmdp structure is zeroed out by CSIO_INIT_MBP. | ||
764 | */ | ||
765 | static void | ||
766 | csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, | ||
767 | uint32_t mb_tmo, bool cascaded_req, | ||
768 | struct csio_eq_params *eq_ofld_params, | ||
769 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
770 | { | ||
771 | struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb); | ||
772 | |||
773 | uint32_t eq_start_stop = (eq_ofld_params->eqstart) ? | ||
774 | FW_EQ_OFLD_CMD_EQSTART : FW_EQ_OFLD_CMD_EQSTOP; | ||
775 | |||
776 | /* | ||
777 | * If this EQ write is cascaded with EQ alloc request, do not | ||
778 | * re-initialize with 0's. | ||
779 | * | ||
780 | */ | ||
781 | if (!cascaded_req) | ||
782 | CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); | ||
783 | |||
784 | cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | | ||
785 | FW_CMD_REQUEST | FW_CMD_WRITE | | ||
786 | FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) | | ||
787 | FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn)); | ||
788 | cmdp->alloc_to_len16 |= htonl(eq_start_stop | | ||
789 | FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
790 | |||
791 | cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid)); | ||
792 | |||
793 | cmdp->fetchszm_to_iqid |= htonl( | ||
794 | FW_EQ_OFLD_CMD_HOSTFCMODE(eq_ofld_params->hostfcmode) | | ||
795 | FW_EQ_OFLD_CMD_CPRIO(eq_ofld_params->cprio) | | ||
796 | FW_EQ_OFLD_CMD_PCIECHN(eq_ofld_params->pciechn) | | ||
797 | FW_EQ_OFLD_CMD_IQID(eq_ofld_params->iqid)); | ||
798 | |||
799 | cmdp->dcaen_to_eqsize |= htonl( | ||
800 | FW_EQ_OFLD_CMD_DCAEN(eq_ofld_params->dcaen) | | ||
801 | FW_EQ_OFLD_CMD_DCACPU(eq_ofld_params->dcacpu) | | ||
802 | FW_EQ_OFLD_CMD_FBMIN(eq_ofld_params->fbmin) | | ||
803 | FW_EQ_OFLD_CMD_FBMAX(eq_ofld_params->fbmax) | | ||
804 | FW_EQ_OFLD_CMD_CIDXFTHRESHO(eq_ofld_params->cidxfthresho) | | ||
805 | FW_EQ_OFLD_CMD_CIDXFTHRESH(eq_ofld_params->cidxfthresh) | | ||
806 | FW_EQ_OFLD_CMD_EQSIZE(eq_ofld_params->eqsize)); | ||
807 | |||
808 | cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr); | ||
809 | |||
810 | } /* csio_mb_eq_ofld_write */ | ||
811 | |||
812 | /* | ||
813 | * csio_mb_eq_ofld_alloc_write - Initializes the mailbox for allocation | ||
814 | * writing into an Engress DMA Queue. | ||
815 | * | ||
816 | * @hw: The HW structure | ||
817 | * @mbp: Mailbox structure to initialize | ||
818 | * @priv: Private data. | ||
819 | * @mb_tmo: Mailbox time-out period (in ms). | ||
820 | * @eq_ofld_params: (Offload) Egress queue paramters. | ||
821 | * @cbfn: The call-back function | ||
822 | * | ||
823 | * | ||
824 | */ | ||
825 | void | ||
826 | csio_mb_eq_ofld_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, | ||
827 | void *priv, uint32_t mb_tmo, | ||
828 | struct csio_eq_params *eq_ofld_params, | ||
829 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
830 | { | ||
831 | csio_mb_eq_ofld_alloc(hw, mbp, priv, mb_tmo, eq_ofld_params, cbfn); | ||
832 | csio_mb_eq_ofld_write(hw, mbp, priv, mb_tmo, true, | ||
833 | eq_ofld_params, cbfn); | ||
834 | } /* csio_mb_eq_ofld_alloc_write */ | ||
835 | |||
836 | /* | ||
837 | * csio_mb_eq_ofld_alloc_write_rsp - Process the allocation | ||
838 | * & write egress DMA queue mailbox's response. | ||
839 | * | ||
840 | * @hw: The HW structure. | ||
841 | * @mbp: Mailbox structure to initialize. | ||
842 | * @retval: Firmware return value. | ||
843 | * @eq_ofld_params: (Offload) Egress queue paramters. | ||
844 | * | ||
845 | */ | ||
846 | void | ||
847 | csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw, | ||
848 | struct csio_mb *mbp, enum fw_retval *ret_val, | ||
849 | struct csio_eq_params *eq_ofld_params) | ||
850 | { | ||
851 | struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb); | ||
852 | |||
853 | *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)); | ||
854 | |||
855 | if (*ret_val == FW_SUCCESS) { | ||
856 | eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_GET( | ||
857 | ntohl(rsp->eqid_pkd)); | ||
858 | eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_GET( | ||
859 | ntohl(rsp->physeqid_pkd)); | ||
860 | } else | ||
861 | eq_ofld_params->eqid = 0; | ||
862 | |||
863 | } /* csio_mb_eq_ofld_alloc_write_rsp */ | ||
864 | |||
865 | /* | ||
866 | * csio_mb_eq_ofld_free - Initializes the mailbox for freeing a | ||
867 | * specified Engress DMA Queue. | ||
868 | * | ||
869 | * @hw: The HW structure | ||
870 | * @mbp: Mailbox structure to initialize | ||
871 | * @priv: Private data area. | ||
872 | * @mb_tmo: Mailbox time-out period (in ms). | ||
873 | * @eq_ofld_params: (Offload) Egress queue paramters, that is to be freed. | ||
874 | * @cbfn: The call-back function | ||
875 | * | ||
876 | * | ||
877 | */ | ||
878 | void | ||
879 | csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv, | ||
880 | uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params, | ||
881 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
882 | { | ||
883 | struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb); | ||
884 | |||
885 | CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); | ||
886 | |||
887 | cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | | ||
888 | FW_CMD_REQUEST | FW_CMD_EXEC | | ||
889 | FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) | | ||
890 | FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn)); | ||
891 | cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | | ||
892 | FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
893 | cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid)); | ||
894 | |||
895 | } /* csio_mb_eq_ofld_free */ | ||
896 | |||
897 | /* | ||
898 | * csio_write_fcoe_link_cond_init_mb - Initialize Mailbox to write FCoE link | ||
899 | * condition. | ||
900 | * | ||
901 | * @ln: The Lnode structure | ||
902 | * @mbp: Mailbox structure to initialize | ||
903 | * @mb_tmo: Mailbox time-out period (in ms). | ||
904 | * @cbfn: The call back function. | ||
905 | * | ||
906 | * | ||
907 | */ | ||
908 | void | ||
909 | csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, | ||
910 | uint32_t mb_tmo, uint8_t port_id, uint32_t sub_opcode, | ||
911 | uint8_t cos, bool link_status, uint32_t fcfi, | ||
912 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
913 | { | ||
914 | struct fw_fcoe_link_cmd *cmdp = | ||
915 | (struct fw_fcoe_link_cmd *)(mbp->mb); | ||
916 | |||
917 | CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); | ||
918 | |||
919 | cmdp->op_to_portid = htonl(( | ||
920 | FW_CMD_OP(FW_FCOE_LINK_CMD) | | ||
921 | FW_CMD_REQUEST | | ||
922 | FW_CMD_WRITE | | ||
923 | FW_FCOE_LINK_CMD_PORTID(port_id))); | ||
924 | cmdp->sub_opcode_fcfi = htonl( | ||
925 | FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) | | ||
926 | FW_FCOE_LINK_CMD_FCFI(fcfi)); | ||
927 | cmdp->lstatus = link_status; | ||
928 | cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
929 | |||
930 | } /* csio_write_fcoe_link_cond_init_mb */ | ||
931 | |||
932 | /* | ||
933 | * csio_fcoe_read_res_info_init_mb - Initializes the mailbox for reading FCoE | ||
934 | * resource information(FW_GET_RES_INFO_CMD). | ||
935 | * | ||
936 | * @hw: The HW structure | ||
937 | * @mbp: Mailbox structure to initialize | ||
938 | * @mb_tmo: Mailbox time-out period (in ms). | ||
939 | * @cbfn: The call-back function | ||
940 | * | ||
941 | * | ||
942 | */ | ||
943 | void | ||
944 | csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp, | ||
945 | uint32_t mb_tmo, | ||
946 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
947 | { | ||
948 | struct fw_fcoe_res_info_cmd *cmdp = | ||
949 | (struct fw_fcoe_res_info_cmd *)(mbp->mb); | ||
950 | |||
951 | CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1); | ||
952 | |||
953 | cmdp->op_to_read = htonl((FW_CMD_OP(FW_FCOE_RES_INFO_CMD) | | ||
954 | FW_CMD_REQUEST | | ||
955 | FW_CMD_READ)); | ||
956 | |||
957 | cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
958 | |||
959 | } /* csio_fcoe_read_res_info_init_mb */ | ||
960 | |||
961 | /* | ||
962 | * csio_fcoe_vnp_alloc_init_mb - Initializes the mailbox for allocating VNP | ||
963 | * in the firmware (FW_FCOE_VNP_CMD). | ||
964 | * | ||
965 | * @ln: The Lnode structure. | ||
966 | * @mbp: Mailbox structure to initialize. | ||
967 | * @mb_tmo: Mailbox time-out period (in ms). | ||
968 | * @fcfi: FCF Index. | ||
969 | * @vnpi: vnpi | ||
970 | * @iqid: iqid | ||
971 | * @vnport_wwnn: vnport WWNN | ||
972 | * @vnport_wwpn: vnport WWPN | ||
973 | * @cbfn: The call-back function. | ||
974 | * | ||
975 | * | ||
976 | */ | ||
977 | void | ||
978 | csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, | ||
979 | uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, uint16_t iqid, | ||
980 | uint8_t vnport_wwnn[8], uint8_t vnport_wwpn[8], | ||
981 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
982 | { | ||
983 | struct fw_fcoe_vnp_cmd *cmdp = | ||
984 | (struct fw_fcoe_vnp_cmd *)(mbp->mb); | ||
985 | |||
986 | CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); | ||
987 | |||
988 | cmdp->op_to_fcfi = htonl((FW_CMD_OP(FW_FCOE_VNP_CMD) | | ||
989 | FW_CMD_REQUEST | | ||
990 | FW_CMD_EXEC | | ||
991 | FW_FCOE_VNP_CMD_FCFI(fcfi))); | ||
992 | |||
993 | cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC | | ||
994 | FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
995 | |||
996 | cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); | ||
997 | |||
998 | cmdp->iqid = htons(iqid); | ||
999 | |||
1000 | if (!wwn_to_u64(vnport_wwnn) && !wwn_to_u64(vnport_wwpn)) | ||
1001 | cmdp->gen_wwn_to_vnpi |= htonl(FW_FCOE_VNP_CMD_GEN_WWN); | ||
1002 | |||
1003 | if (vnport_wwnn) | ||
1004 | memcpy(cmdp->vnport_wwnn, vnport_wwnn, 8); | ||
1005 | if (vnport_wwpn) | ||
1006 | memcpy(cmdp->vnport_wwpn, vnport_wwpn, 8); | ||
1007 | |||
1008 | } /* csio_fcoe_vnp_alloc_init_mb */ | ||
1009 | |||
1010 | /* | ||
1011 | * csio_fcoe_vnp_read_init_mb - Prepares VNP read cmd. | ||
1012 | * @ln: The Lnode structure. | ||
1013 | * @mbp: Mailbox structure to initialize. | ||
1014 | * @mb_tmo: Mailbox time-out period (in ms). | ||
1015 | * @fcfi: FCF Index. | ||
1016 | * @vnpi: vnpi | ||
1017 | * @cbfn: The call-back handler. | ||
1018 | */ | ||
1019 | void | ||
1020 | csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, | ||
1021 | uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, | ||
1022 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
1023 | { | ||
1024 | struct fw_fcoe_vnp_cmd *cmdp = | ||
1025 | (struct fw_fcoe_vnp_cmd *)(mbp->mb); | ||
1026 | |||
1027 | CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); | ||
1028 | cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) | | ||
1029 | FW_CMD_REQUEST | | ||
1030 | FW_CMD_READ | | ||
1031 | FW_FCOE_VNP_CMD_FCFI(fcfi)); | ||
1032 | cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
1033 | cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); | ||
1034 | } | ||
1035 | |||
1036 | /* | ||
1037 | * csio_fcoe_vnp_free_init_mb - Initializes the mailbox for freeing an | ||
1038 | * alloacted VNP in the firmware (FW_FCOE_VNP_CMD). | ||
1039 | * | ||
1040 | * @ln: The Lnode structure. | ||
1041 | * @mbp: Mailbox structure to initialize. | ||
1042 | * @mb_tmo: Mailbox time-out period (in ms). | ||
1043 | * @fcfi: FCF flow id | ||
1044 | * @vnpi: VNP flow id | ||
1045 | * @cbfn: The call-back function. | ||
1046 | * Return: None | ||
1047 | */ | ||
1048 | void | ||
1049 | csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, | ||
1050 | uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, | ||
1051 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
1052 | { | ||
1053 | struct fw_fcoe_vnp_cmd *cmdp = | ||
1054 | (struct fw_fcoe_vnp_cmd *)(mbp->mb); | ||
1055 | |||
1056 | CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); | ||
1057 | |||
1058 | cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) | | ||
1059 | FW_CMD_REQUEST | | ||
1060 | FW_CMD_EXEC | | ||
1061 | FW_FCOE_VNP_CMD_FCFI(fcfi)); | ||
1062 | cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE | | ||
1063 | FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
1064 | cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); | ||
1065 | } | ||
1066 | |||
1067 | /* | ||
1068 | * csio_fcoe_read_fcf_init_mb - Initializes the mailbox to read the | ||
1069 | * FCF records. | ||
1070 | * | ||
1071 | * @ln: The Lnode structure | ||
1072 | * @mbp: Mailbox structure to initialize | ||
1073 | * @mb_tmo: Mailbox time-out period (in ms). | ||
1074 | * @fcf_params: FC-Forwarder parameters. | ||
1075 | * @cbfn: The call-back function | ||
1076 | * | ||
1077 | * | ||
1078 | */ | ||
1079 | void | ||
1080 | csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, | ||
1081 | uint32_t mb_tmo, uint32_t portid, uint32_t fcfi, | ||
1082 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
1083 | { | ||
1084 | struct fw_fcoe_fcf_cmd *cmdp = | ||
1085 | (struct fw_fcoe_fcf_cmd *)(mbp->mb); | ||
1086 | |||
1087 | CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); | ||
1088 | |||
1089 | cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_FCF_CMD) | | ||
1090 | FW_CMD_REQUEST | | ||
1091 | FW_CMD_READ | | ||
1092 | FW_FCOE_FCF_CMD_FCFI(fcfi)); | ||
1093 | cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); | ||
1094 | |||
1095 | } /* csio_fcoe_read_fcf_init_mb */ | ||
1096 | |||
1097 | void | ||
1098 | csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp, | ||
1099 | uint32_t mb_tmo, | ||
1100 | struct fw_fcoe_port_cmd_params *portparams, | ||
1101 | void (*cbfn)(struct csio_hw *, | ||
1102 | struct csio_mb *)) | ||
1103 | { | ||
1104 | struct fw_fcoe_stats_cmd *cmdp = (struct fw_fcoe_stats_cmd *)(mbp->mb); | ||
1105 | |||
1106 | CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1); | ||
1107 | mbp->mb_size = 64; | ||
1108 | |||
1109 | cmdp->op_to_flowid = htonl(FW_CMD_OP(FW_FCOE_STATS_CMD) | | ||
1110 | FW_CMD_REQUEST | FW_CMD_READ); | ||
1111 | cmdp->free_to_len16 = htonl(FW_CMD_LEN16(CSIO_MAX_MB_SIZE/16)); | ||
1112 | |||
1113 | cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) | | ||
1114 | FW_FCOE_STATS_CMD_PORT(portparams->portid); | ||
1115 | |||
1116 | cmdp->u.ctl.port_valid_ix = FW_FCOE_STATS_CMD_IX(portparams->idx) | | ||
1117 | FW_FCOE_STATS_CMD_PORT_VALID; | ||
1118 | |||
1119 | } /* csio_fcoe_read_portparams_init_mb */ | ||
1120 | |||
1121 | void | ||
1122 | csio_mb_process_portparams_rsp( | ||
1123 | struct csio_hw *hw, | ||
1124 | struct csio_mb *mbp, | ||
1125 | enum fw_retval *retval, | ||
1126 | struct fw_fcoe_port_cmd_params *portparams, | ||
1127 | struct fw_fcoe_port_stats *portstats | ||
1128 | ) | ||
1129 | { | ||
1130 | struct fw_fcoe_stats_cmd *rsp = (struct fw_fcoe_stats_cmd *)(mbp->mb); | ||
1131 | struct fw_fcoe_port_stats stats; | ||
1132 | uint8_t *src; | ||
1133 | uint8_t *dst; | ||
1134 | |||
1135 | *retval = FW_CMD_RETVAL_GET(ntohl(rsp->free_to_len16)); | ||
1136 | |||
1137 | memset(&stats, 0, sizeof(struct fw_fcoe_port_stats)); | ||
1138 | |||
1139 | if (*retval == FW_SUCCESS) { | ||
1140 | dst = (uint8_t *)(&stats) + ((portparams->idx - 1) * 8); | ||
1141 | src = (uint8_t *)rsp + (CSIO_STATS_OFFSET * 8); | ||
1142 | memcpy(dst, src, (portparams->nstats * 8)); | ||
1143 | if (portparams->idx == 1) { | ||
1144 | /* Get the first 6 flits from the Mailbox */ | ||
1145 | portstats->tx_bcast_bytes = | ||
1146 | be64_to_cpu(stats.tx_bcast_bytes); | ||
1147 | portstats->tx_bcast_frames = | ||
1148 | be64_to_cpu(stats.tx_bcast_frames); | ||
1149 | portstats->tx_mcast_bytes = | ||
1150 | be64_to_cpu(stats.tx_mcast_bytes); | ||
1151 | portstats->tx_mcast_frames = | ||
1152 | be64_to_cpu(stats.tx_mcast_frames); | ||
1153 | portstats->tx_ucast_bytes = | ||
1154 | be64_to_cpu(stats.tx_ucast_bytes); | ||
1155 | portstats->tx_ucast_frames = | ||
1156 | be64_to_cpu(stats.tx_ucast_frames); | ||
1157 | } | ||
1158 | if (portparams->idx == 7) { | ||
1159 | /* Get the second 6 flits from the Mailbox */ | ||
1160 | portstats->tx_drop_frames = | ||
1161 | be64_to_cpu(stats.tx_drop_frames); | ||
1162 | portstats->tx_offload_bytes = | ||
1163 | be64_to_cpu(stats.tx_offload_bytes); | ||
1164 | portstats->tx_offload_frames = | ||
1165 | be64_to_cpu(stats.tx_offload_frames); | ||
1166 | #if 0 | ||
1167 | portstats->rx_pf_bytes = | ||
1168 | be64_to_cpu(stats.rx_pf_bytes); | ||
1169 | portstats->rx_pf_frames = | ||
1170 | be64_to_cpu(stats.rx_pf_frames); | ||
1171 | #endif | ||
1172 | portstats->rx_bcast_bytes = | ||
1173 | be64_to_cpu(stats.rx_bcast_bytes); | ||
1174 | portstats->rx_bcast_frames = | ||
1175 | be64_to_cpu(stats.rx_bcast_frames); | ||
1176 | portstats->rx_mcast_bytes = | ||
1177 | be64_to_cpu(stats.rx_mcast_bytes); | ||
1178 | } | ||
1179 | if (portparams->idx == 13) { | ||
1180 | /* Get the last 4 flits from the Mailbox */ | ||
1181 | portstats->rx_mcast_frames = | ||
1182 | be64_to_cpu(stats.rx_mcast_frames); | ||
1183 | portstats->rx_ucast_bytes = | ||
1184 | be64_to_cpu(stats.rx_ucast_bytes); | ||
1185 | portstats->rx_ucast_frames = | ||
1186 | be64_to_cpu(stats.rx_ucast_frames); | ||
1187 | portstats->rx_err_frames = | ||
1188 | be64_to_cpu(stats.rx_err_frames); | ||
1189 | } | ||
1190 | } | ||
1191 | } | ||
1192 | |||
1193 | /* Entry points/APIs for MB module */ | ||
1194 | /* | ||
1195 | * csio_mb_intr_enable - Enable Interrupts from mailboxes. | ||
1196 | * @hw: The HW structure | ||
1197 | * | ||
1198 | * Enables CIM interrupt bit in appropriate INT_ENABLE registers. | ||
1199 | */ | ||
1200 | void | ||
1201 | csio_mb_intr_enable(struct csio_hw *hw) | ||
1202 | { | ||
1203 | csio_wr_reg32(hw, MBMSGRDYINTEN(1), MYPF_REG(CIM_PF_HOST_INT_ENABLE)); | ||
1204 | csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE)); | ||
1205 | } | ||
1206 | |||
1207 | /* | ||
1208 | * csio_mb_intr_disable - Disable Interrupts from mailboxes. | ||
1209 | * @hw: The HW structure | ||
1210 | * | ||
1211 | * Disable bit in HostInterruptEnable CIM register. | ||
1212 | */ | ||
1213 | void | ||
1214 | csio_mb_intr_disable(struct csio_hw *hw) | ||
1215 | { | ||
1216 | csio_wr_reg32(hw, MBMSGRDYINTEN(0), MYPF_REG(CIM_PF_HOST_INT_ENABLE)); | ||
1217 | csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE)); | ||
1218 | } | ||
1219 | |||
1220 | static void | ||
1221 | csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd) | ||
1222 | { | ||
1223 | struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd; | ||
1224 | |||
1225 | if ((FW_DEBUG_CMD_TYPE_GET(ntohl(dbg->op_type))) == 1) { | ||
1226 | csio_info(hw, "FW print message:\n"); | ||
1227 | csio_info(hw, "\tdebug->dprtstridx = %d\n", | ||
1228 | ntohs(dbg->u.prt.dprtstridx)); | ||
1229 | csio_info(hw, "\tdebug->dprtstrparam0 = 0x%x\n", | ||
1230 | ntohl(dbg->u.prt.dprtstrparam0)); | ||
1231 | csio_info(hw, "\tdebug->dprtstrparam1 = 0x%x\n", | ||
1232 | ntohl(dbg->u.prt.dprtstrparam1)); | ||
1233 | csio_info(hw, "\tdebug->dprtstrparam2 = 0x%x\n", | ||
1234 | ntohl(dbg->u.prt.dprtstrparam2)); | ||
1235 | csio_info(hw, "\tdebug->dprtstrparam3 = 0x%x\n", | ||
1236 | ntohl(dbg->u.prt.dprtstrparam3)); | ||
1237 | } else { | ||
1238 | /* This is a FW assertion */ | ||
1239 | csio_fatal(hw, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", | ||
1240 | dbg->u.assert.filename_0_7, | ||
1241 | ntohl(dbg->u.assert.line), | ||
1242 | ntohl(dbg->u.assert.x), | ||
1243 | ntohl(dbg->u.assert.y)); | ||
1244 | } | ||
1245 | } | ||
1246 | |||
1247 | static void | ||
1248 | csio_mb_debug_cmd_handler(struct csio_hw *hw) | ||
1249 | { | ||
1250 | int i; | ||
1251 | __be64 cmd[CSIO_MB_MAX_REGS]; | ||
1252 | uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL); | ||
1253 | uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA); | ||
1254 | int size = sizeof(struct fw_debug_cmd); | ||
1255 | |||
1256 | /* Copy mailbox data */ | ||
1257 | for (i = 0; i < size; i += 8) | ||
1258 | cmd[i / 8] = cpu_to_be64(csio_rd_reg64(hw, data_reg + i)); | ||
1259 | |||
1260 | csio_mb_dump_fw_dbg(hw, cmd); | ||
1261 | |||
1262 | /* Notify FW of mailbox by setting owner as UP */ | ||
1263 | csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | MBOWNER(CSIO_MBOWNER_FW), | ||
1264 | ctl_reg); | ||
1265 | |||
1266 | csio_rd_reg32(hw, ctl_reg); | ||
1267 | wmb(); | ||
1268 | } | ||
1269 | |||
1270 | /* | ||
1271 | * csio_mb_issue - generic routine for issuing Mailbox commands. | ||
1272 | * @hw: The HW structure | ||
1273 | * @mbp: Mailbox command to issue | ||
1274 | * | ||
1275 | * Caller should hold hw lock across this call. | ||
1276 | */ | ||
1277 | int | ||
1278 | csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp) | ||
1279 | { | ||
1280 | uint32_t owner, ctl; | ||
1281 | int i; | ||
1282 | uint32_t ii; | ||
1283 | __be64 *cmd = mbp->mb; | ||
1284 | __be64 hdr; | ||
1285 | struct csio_mbm *mbm = &hw->mbm; | ||
1286 | uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL); | ||
1287 | uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA); | ||
1288 | int size = mbp->mb_size; | ||
1289 | int rv = -EINVAL; | ||
1290 | struct fw_cmd_hdr *fw_hdr; | ||
1291 | |||
1292 | /* Determine mode */ | ||
1293 | if (mbp->mb_cbfn == NULL) { | ||
1294 | /* Need to issue/get results in the same context */ | ||
1295 | if (mbp->tmo < CSIO_MB_POLL_FREQ) { | ||
1296 | csio_err(hw, "Invalid tmo: 0x%x\n", mbp->tmo); | ||
1297 | goto error_out; | ||
1298 | } | ||
1299 | } else if (!csio_is_host_intr_enabled(hw) || | ||
1300 | !csio_is_hw_intr_enabled(hw)) { | ||
1301 | csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n", | ||
1302 | *((uint8_t *)mbp->mb)); | ||
1303 | goto error_out; | ||
1304 | } | ||
1305 | |||
1306 | if (mbm->mcurrent != NULL) { | ||
1307 | /* Queue mbox cmd, if another mbox cmd is active */ | ||
1308 | if (mbp->mb_cbfn == NULL) { | ||
1309 | rv = -EBUSY; | ||
1310 | csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n", | ||
1311 | hw->pfn, *((uint8_t *)mbp->mb)); | ||
1312 | |||
1313 | goto error_out; | ||
1314 | } else { | ||
1315 | list_add_tail(&mbp->list, &mbm->req_q); | ||
1316 | CSIO_INC_STATS(mbm, n_activeq); | ||
1317 | |||
1318 | return 0; | ||
1319 | } | ||
1320 | } | ||
1321 | |||
1322 | /* Now get ownership of mailbox */ | ||
1323 | owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg)); | ||
1324 | |||
1325 | if (!csio_mb_is_host_owner(owner)) { | ||
1326 | |||
1327 | for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++) | ||
1328 | owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg)); | ||
1329 | /* | ||
1330 | * Mailbox unavailable. In immediate mode, fail the command. | ||
1331 | * In other modes, enqueue the request. | ||
1332 | */ | ||
1333 | if (!csio_mb_is_host_owner(owner)) { | ||
1334 | if (mbp->mb_cbfn == NULL) { | ||
1335 | rv = owner ? -EBUSY : -ETIMEDOUT; | ||
1336 | |||
1337 | csio_dbg(hw, | ||
1338 | "Couldnt own Mailbox %x op:0x%x " | ||
1339 | "owner:%x\n", | ||
1340 | hw->pfn, *((uint8_t *)mbp->mb), owner); | ||
1341 | goto error_out; | ||
1342 | } else { | ||
1343 | if (mbm->mcurrent == NULL) { | ||
1344 | csio_err(hw, | ||
1345 | "Couldnt own Mailbox %x " | ||
1346 | "op:0x%x owner:%x\n", | ||
1347 | hw->pfn, *((uint8_t *)mbp->mb), | ||
1348 | owner); | ||
1349 | csio_err(hw, | ||
1350 | "No outstanding driver" | ||
1351 | " mailbox as well\n"); | ||
1352 | goto error_out; | ||
1353 | } | ||
1354 | } | ||
1355 | } | ||
1356 | } | ||
1357 | |||
1358 | /* Mailbox is available, copy mailbox data into it */ | ||
1359 | for (i = 0; i < size; i += 8) { | ||
1360 | csio_wr_reg64(hw, be64_to_cpu(*cmd), data_reg + i); | ||
1361 | cmd++; | ||
1362 | } | ||
1363 | |||
1364 | CSIO_DUMP_MB(hw, hw->pfn, data_reg); | ||
1365 | |||
1366 | /* Start completion timers in non-immediate modes and notify FW */ | ||
1367 | if (mbp->mb_cbfn != NULL) { | ||
1368 | mbm->mcurrent = mbp; | ||
1369 | mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo)); | ||
1370 | csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | | ||
1371 | MBOWNER(CSIO_MBOWNER_FW), ctl_reg); | ||
1372 | } else | ||
1373 | csio_wr_reg32(hw, MBMSGVALID | MBOWNER(CSIO_MBOWNER_FW), | ||
1374 | ctl_reg); | ||
1375 | |||
1376 | /* Flush posted writes */ | ||
1377 | csio_rd_reg32(hw, ctl_reg); | ||
1378 | wmb(); | ||
1379 | |||
1380 | CSIO_INC_STATS(mbm, n_req); | ||
1381 | |||
1382 | if (mbp->mb_cbfn) | ||
1383 | return 0; | ||
1384 | |||
1385 | /* Poll for completion in immediate mode */ | ||
1386 | cmd = mbp->mb; | ||
1387 | |||
1388 | for (ii = 0; ii < mbp->tmo; ii += CSIO_MB_POLL_FREQ) { | ||
1389 | mdelay(CSIO_MB_POLL_FREQ); | ||
1390 | |||
1391 | /* Check for response */ | ||
1392 | ctl = csio_rd_reg32(hw, ctl_reg); | ||
1393 | if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) { | ||
1394 | |||
1395 | if (!(ctl & MBMSGVALID)) { | ||
1396 | csio_wr_reg32(hw, 0, ctl_reg); | ||
1397 | continue; | ||
1398 | } | ||
1399 | |||
1400 | CSIO_DUMP_MB(hw, hw->pfn, data_reg); | ||
1401 | |||
1402 | hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg)); | ||
1403 | fw_hdr = (struct fw_cmd_hdr *)&hdr; | ||
1404 | |||
1405 | switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) { | ||
1406 | case FW_DEBUG_CMD: | ||
1407 | csio_mb_debug_cmd_handler(hw); | ||
1408 | continue; | ||
1409 | } | ||
1410 | |||
1411 | /* Copy response */ | ||
1412 | for (i = 0; i < size; i += 8) | ||
1413 | *cmd++ = cpu_to_be64(csio_rd_reg64 | ||
1414 | (hw, data_reg + i)); | ||
1415 | csio_wr_reg32(hw, 0, ctl_reg); | ||
1416 | |||
1417 | if (FW_CMD_RETVAL_GET(*(mbp->mb))) | ||
1418 | CSIO_INC_STATS(mbm, n_err); | ||
1419 | |||
1420 | CSIO_INC_STATS(mbm, n_rsp); | ||
1421 | return 0; | ||
1422 | } | ||
1423 | } | ||
1424 | |||
1425 | CSIO_INC_STATS(mbm, n_tmo); | ||
1426 | |||
1427 | csio_err(hw, "Mailbox %x op:0x%x timed out!\n", | ||
1428 | hw->pfn, *((uint8_t *)cmd)); | ||
1429 | |||
1430 | return -ETIMEDOUT; | ||
1431 | |||
1432 | error_out: | ||
1433 | CSIO_INC_STATS(mbm, n_err); | ||
1434 | return rv; | ||
1435 | } | ||
1436 | |||
1437 | /* | ||
1438 | * csio_mb_completions - Completion handler for Mailbox commands | ||
1439 | * @hw: The HW structure | ||
1440 | * @cbfn_q: Completion queue. | ||
1441 | * | ||
1442 | */ | ||
1443 | void | ||
1444 | csio_mb_completions(struct csio_hw *hw, struct list_head *cbfn_q) | ||
1445 | { | ||
1446 | struct csio_mb *mbp; | ||
1447 | struct csio_mbm *mbm = &hw->mbm; | ||
1448 | enum fw_retval rv; | ||
1449 | |||
1450 | while (!list_empty(cbfn_q)) { | ||
1451 | mbp = list_first_entry(cbfn_q, struct csio_mb, list); | ||
1452 | list_del_init(&mbp->list); | ||
1453 | |||
1454 | rv = csio_mb_fw_retval(mbp); | ||
1455 | if ((rv != FW_SUCCESS) && (rv != FW_HOSTERROR)) | ||
1456 | CSIO_INC_STATS(mbm, n_err); | ||
1457 | else if (rv != FW_HOSTERROR) | ||
1458 | CSIO_INC_STATS(mbm, n_rsp); | ||
1459 | |||
1460 | if (mbp->mb_cbfn) | ||
1461 | mbp->mb_cbfn(hw, mbp); | ||
1462 | } | ||
1463 | } | ||
1464 | |||
1465 | static void | ||
1466 | csio_mb_portmod_changed(struct csio_hw *hw, uint8_t port_id) | ||
1467 | { | ||
1468 | static char *mod_str[] = { | ||
1469 | NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" | ||
1470 | }; | ||
1471 | |||
1472 | struct csio_pport *port = &hw->pport[port_id]; | ||
1473 | |||
1474 | if (port->mod_type == FW_PORT_MOD_TYPE_NONE) | ||
1475 | csio_info(hw, "Port:%d - port module unplugged\n", port_id); | ||
1476 | else if (port->mod_type < ARRAY_SIZE(mod_str)) | ||
1477 | csio_info(hw, "Port:%d - %s port module inserted\n", port_id, | ||
1478 | mod_str[port->mod_type]); | ||
1479 | else if (port->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) | ||
1480 | csio_info(hw, | ||
1481 | "Port:%d - unsupported optical port module " | ||
1482 | "inserted\n", port_id); | ||
1483 | else if (port->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) | ||
1484 | csio_info(hw, | ||
1485 | "Port:%d - unknown port module inserted, forcing " | ||
1486 | "TWINAX\n", port_id); | ||
1487 | else if (port->mod_type == FW_PORT_MOD_TYPE_ERROR) | ||
1488 | csio_info(hw, "Port:%d - transceiver module error\n", port_id); | ||
1489 | else | ||
1490 | csio_info(hw, "Port:%d - unknown module type %d inserted\n", | ||
1491 | port_id, port->mod_type); | ||
1492 | } | ||
1493 | |||
1494 | int | ||
1495 | csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd) | ||
1496 | { | ||
1497 | uint8_t opcode = *(uint8_t *)cmd; | ||
1498 | struct fw_port_cmd *pcmd; | ||
1499 | uint8_t port_id; | ||
1500 | uint32_t link_status; | ||
1501 | uint16_t action; | ||
1502 | uint8_t mod_type; | ||
1503 | |||
1504 | if (opcode == FW_PORT_CMD) { | ||
1505 | pcmd = (struct fw_port_cmd *)cmd; | ||
1506 | port_id = FW_PORT_CMD_PORTID_GET( | ||
1507 | ntohl(pcmd->op_to_portid)); | ||
1508 | action = FW_PORT_CMD_ACTION_GET( | ||
1509 | ntohl(pcmd->action_to_len16)); | ||
1510 | if (action != FW_PORT_ACTION_GET_PORT_INFO) { | ||
1511 | csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n", | ||
1512 | action); | ||
1513 | return -EINVAL; | ||
1514 | } | ||
1515 | |||
1516 | link_status = ntohl(pcmd->u.info.lstatus_to_modtype); | ||
1517 | mod_type = FW_PORT_CMD_MODTYPE_GET(link_status); | ||
1518 | |||
1519 | hw->pport[port_id].link_status = | ||
1520 | FW_PORT_CMD_LSTATUS_GET(link_status); | ||
1521 | hw->pport[port_id].link_speed = | ||
1522 | FW_PORT_CMD_LSPEED_GET(link_status); | ||
1523 | |||
1524 | csio_info(hw, "Port:%x - LINK %s\n", port_id, | ||
1525 | FW_PORT_CMD_LSTATUS_GET(link_status) ? "UP" : "DOWN"); | ||
1526 | |||
1527 | if (mod_type != hw->pport[port_id].mod_type) { | ||
1528 | hw->pport[port_id].mod_type = mod_type; | ||
1529 | csio_mb_portmod_changed(hw, port_id); | ||
1530 | } | ||
1531 | } else if (opcode == FW_DEBUG_CMD) { | ||
1532 | csio_mb_dump_fw_dbg(hw, cmd); | ||
1533 | } else { | ||
1534 | csio_dbg(hw, "Gen MB can't handle op:0x%x on evtq.\n", opcode); | ||
1535 | return -EINVAL; | ||
1536 | } | ||
1537 | |||
1538 | return 0; | ||
1539 | } | ||
1540 | |||
1541 | /* | ||
1542 | * csio_mb_isr_handler - Handle mailboxes related interrupts. | ||
1543 | * @hw: The HW structure | ||
1544 | * | ||
1545 | * Called from the ISR to handle Mailbox related interrupts. | ||
1546 | * HW Lock should be held across this call. | ||
1547 | */ | ||
1548 | int | ||
1549 | csio_mb_isr_handler(struct csio_hw *hw) | ||
1550 | { | ||
1551 | struct csio_mbm *mbm = &hw->mbm; | ||
1552 | struct csio_mb *mbp = mbm->mcurrent; | ||
1553 | __be64 *cmd; | ||
1554 | uint32_t ctl, cim_cause, pl_cause; | ||
1555 | int i; | ||
1556 | uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL); | ||
1557 | uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA); | ||
1558 | int size; | ||
1559 | __be64 hdr; | ||
1560 | struct fw_cmd_hdr *fw_hdr; | ||
1561 | |||
1562 | pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE)); | ||
1563 | cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE)); | ||
1564 | |||
1565 | if (!(pl_cause & PFCIM) || !(cim_cause & MBMSGRDYINT)) { | ||
1566 | CSIO_INC_STATS(hw, n_mbint_unexp); | ||
1567 | return -EINVAL; | ||
1568 | } | ||
1569 | |||
1570 | /* | ||
1571 | * The cause registers below HAVE to be cleared in the SAME | ||
1572 | * order as below: The low level cause register followed by | ||
1573 | * the upper level cause register. In other words, CIM-cause | ||
1574 | * first followed by PL-Cause next. | ||
1575 | */ | ||
1576 | csio_wr_reg32(hw, MBMSGRDYINT, MYPF_REG(CIM_PF_HOST_INT_CAUSE)); | ||
1577 | csio_wr_reg32(hw, PFCIM, MYPF_REG(PL_PF_INT_CAUSE)); | ||
1578 | |||
1579 | ctl = csio_rd_reg32(hw, ctl_reg); | ||
1580 | |||
1581 | if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) { | ||
1582 | |||
1583 | CSIO_DUMP_MB(hw, hw->pfn, data_reg); | ||
1584 | |||
1585 | if (!(ctl & MBMSGVALID)) { | ||
1586 | csio_warn(hw, | ||
1587 | "Stray mailbox interrupt recvd," | ||
1588 | " mailbox data not valid\n"); | ||
1589 | csio_wr_reg32(hw, 0, ctl_reg); | ||
1590 | /* Flush */ | ||
1591 | csio_rd_reg32(hw, ctl_reg); | ||
1592 | return -EINVAL; | ||
1593 | } | ||
1594 | |||
1595 | hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg)); | ||
1596 | fw_hdr = (struct fw_cmd_hdr *)&hdr; | ||
1597 | |||
1598 | switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) { | ||
1599 | case FW_DEBUG_CMD: | ||
1600 | csio_mb_debug_cmd_handler(hw); | ||
1601 | return -EINVAL; | ||
1602 | #if 0 | ||
1603 | case FW_ERROR_CMD: | ||
1604 | case FW_INITIALIZE_CMD: /* When we are not master */ | ||
1605 | #endif | ||
1606 | } | ||
1607 | |||
1608 | CSIO_ASSERT(mbp != NULL); | ||
1609 | |||
1610 | cmd = mbp->mb; | ||
1611 | size = mbp->mb_size; | ||
1612 | /* Get response */ | ||
1613 | for (i = 0; i < size; i += 8) | ||
1614 | *cmd++ = cpu_to_be64(csio_rd_reg64 | ||
1615 | (hw, data_reg + i)); | ||
1616 | |||
1617 | csio_wr_reg32(hw, 0, ctl_reg); | ||
1618 | /* Flush */ | ||
1619 | csio_rd_reg32(hw, ctl_reg); | ||
1620 | |||
1621 | mbm->mcurrent = NULL; | ||
1622 | |||
1623 | /* Add completion to tail of cbfn queue */ | ||
1624 | list_add_tail(&mbp->list, &mbm->cbfn_q); | ||
1625 | CSIO_INC_STATS(mbm, n_cbfnq); | ||
1626 | |||
1627 | /* | ||
1628 | * Enqueue event to EventQ. Events processing happens | ||
1629 | * in Event worker thread context | ||
1630 | */ | ||
1631 | if (csio_enqueue_evt(hw, CSIO_EVT_MBX, mbp, sizeof(mbp))) | ||
1632 | CSIO_INC_STATS(hw, n_evt_drop); | ||
1633 | |||
1634 | return 0; | ||
1635 | |||
1636 | } else { | ||
1637 | /* | ||
1638 | * We can get here if mailbox MSIX vector is shared, | ||
1639 | * or in INTx case. Or a stray interrupt. | ||
1640 | */ | ||
1641 | csio_dbg(hw, "Host not owner, no mailbox interrupt\n"); | ||
1642 | CSIO_INC_STATS(hw, n_int_stray); | ||
1643 | return -EINVAL; | ||
1644 | } | ||
1645 | } | ||
1646 | |||
1647 | /* | ||
1648 | * csio_mb_tmo_handler - Timeout handler | ||
1649 | * @hw: The HW structure | ||
1650 | * | ||
1651 | */ | ||
1652 | struct csio_mb * | ||
1653 | csio_mb_tmo_handler(struct csio_hw *hw) | ||
1654 | { | ||
1655 | struct csio_mbm *mbm = &hw->mbm; | ||
1656 | struct csio_mb *mbp = mbm->mcurrent; | ||
1657 | struct fw_cmd_hdr *fw_hdr; | ||
1658 | |||
1659 | /* | ||
1660 | * Could be a race b/w the completion handler and the timer | ||
1661 | * and the completion handler won that race. | ||
1662 | */ | ||
1663 | if (mbp == NULL) { | ||
1664 | CSIO_DB_ASSERT(0); | ||
1665 | return NULL; | ||
1666 | } | ||
1667 | |||
1668 | fw_hdr = (struct fw_cmd_hdr *)(mbp->mb); | ||
1669 | |||
1670 | csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn, | ||
1671 | FW_CMD_OP_GET(ntohl(fw_hdr->hi))); | ||
1672 | |||
1673 | mbm->mcurrent = NULL; | ||
1674 | CSIO_INC_STATS(mbm, n_tmo); | ||
1675 | fw_hdr->lo = htonl(FW_CMD_RETVAL(FW_ETIMEDOUT)); | ||
1676 | |||
1677 | return mbp; | ||
1678 | } | ||
1679 | |||
1680 | /* | ||
1681 | * csio_mb_cancel_all - Cancel all waiting commands. | ||
1682 | * @hw: The HW structure | ||
1683 | * @cbfn_q: The callback queue. | ||
1684 | * | ||
1685 | * Caller should hold hw lock across this call. | ||
1686 | */ | ||
1687 | void | ||
1688 | csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q) | ||
1689 | { | ||
1690 | struct csio_mb *mbp; | ||
1691 | struct csio_mbm *mbm = &hw->mbm; | ||
1692 | struct fw_cmd_hdr *hdr; | ||
1693 | struct list_head *tmp; | ||
1694 | |||
1695 | if (mbm->mcurrent) { | ||
1696 | mbp = mbm->mcurrent; | ||
1697 | |||
1698 | /* Stop mailbox completion timer */ | ||
1699 | del_timer_sync(&mbm->timer); | ||
1700 | |||
1701 | /* Add completion to tail of cbfn queue */ | ||
1702 | list_add_tail(&mbp->list, cbfn_q); | ||
1703 | mbm->mcurrent = NULL; | ||
1704 | } | ||
1705 | |||
1706 | if (!list_empty(&mbm->req_q)) { | ||
1707 | list_splice_tail_init(&mbm->req_q, cbfn_q); | ||
1708 | mbm->stats.n_activeq = 0; | ||
1709 | } | ||
1710 | |||
1711 | if (!list_empty(&mbm->cbfn_q)) { | ||
1712 | list_splice_tail_init(&mbm->cbfn_q, cbfn_q); | ||
1713 | mbm->stats.n_cbfnq = 0; | ||
1714 | } | ||
1715 | |||
1716 | if (list_empty(cbfn_q)) | ||
1717 | return; | ||
1718 | |||
1719 | list_for_each(tmp, cbfn_q) { | ||
1720 | mbp = (struct csio_mb *)tmp; | ||
1721 | hdr = (struct fw_cmd_hdr *)(mbp->mb); | ||
1722 | |||
1723 | csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n", | ||
1724 | hw->pfn, FW_CMD_OP_GET(ntohl(hdr->hi))); | ||
1725 | |||
1726 | CSIO_INC_STATS(mbm, n_cancel); | ||
1727 | hdr->lo = htonl(FW_CMD_RETVAL(FW_HOSTERROR)); | ||
1728 | } | ||
1729 | } | ||
1730 | |||
1731 | /* | ||
1732 | * csio_mbm_init - Initialize Mailbox module | ||
1733 | * @mbm: Mailbox module | ||
1734 | * @hw: The HW structure | ||
1735 | * @timer: Timing function for interrupting mailboxes | ||
1736 | * | ||
1737 | * Initialize timer and the request/response queues. | ||
1738 | */ | ||
1739 | int | ||
1740 | csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw, | ||
1741 | void (*timer_fn)(uintptr_t)) | ||
1742 | { | ||
1743 | struct timer_list *timer = &mbm->timer; | ||
1744 | |||
1745 | init_timer(timer); | ||
1746 | timer->function = timer_fn; | ||
1747 | timer->data = (unsigned long)hw; | ||
1748 | |||
1749 | INIT_LIST_HEAD(&mbm->req_q); | ||
1750 | INIT_LIST_HEAD(&mbm->cbfn_q); | ||
1751 | csio_set_mb_intr_idx(mbm, -1); | ||
1752 | |||
1753 | return 0; | ||
1754 | } | ||
1755 | |||
1756 | /* | ||
1757 | * csio_mbm_exit - Uninitialize mailbox module | ||
1758 | * @mbm: Mailbox module | ||
1759 | * | ||
1760 | * Stop timer. | ||
1761 | */ | ||
1762 | void | ||
1763 | csio_mbm_exit(struct csio_mbm *mbm) | ||
1764 | { | ||
1765 | del_timer_sync(&mbm->timer); | ||
1766 | |||
1767 | CSIO_DB_ASSERT(mbm->mcurrent == NULL); | ||
1768 | CSIO_DB_ASSERT(list_empty(&mbm->req_q)); | ||
1769 | CSIO_DB_ASSERT(list_empty(&mbm->cbfn_q)); | ||
1770 | } | ||
diff --git a/drivers/scsi/csiostor/csio_mb.h b/drivers/scsi/csiostor/csio_mb.h new file mode 100644 index 000000000000..1788ea506f39 --- /dev/null +++ b/drivers/scsi/csiostor/csio_mb.h | |||
@@ -0,0 +1,278 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef __CSIO_MB_H__ | ||
36 | #define __CSIO_MB_H__ | ||
37 | |||
38 | #include <linux/timer.h> | ||
39 | #include <linux/completion.h> | ||
40 | |||
41 | #include "t4fw_api.h" | ||
42 | #include "t4fw_api_stor.h" | ||
43 | #include "csio_defs.h" | ||
44 | |||
45 | #define CSIO_STATS_OFFSET (2) | ||
46 | #define CSIO_NUM_STATS_PER_MB (6) | ||
47 | |||
48 | struct fw_fcoe_port_cmd_params { | ||
49 | uint8_t portid; | ||
50 | uint8_t idx; | ||
51 | uint8_t nstats; | ||
52 | }; | ||
53 | |||
54 | #define CSIO_DUMP_MB(__hw, __num, __mb) \ | ||
55 | csio_dbg(__hw, "\t%llx %llx %llx %llx %llx %llx %llx %llx\n", \ | ||
56 | (unsigned long long)csio_rd_reg64(__hw, __mb), \ | ||
57 | (unsigned long long)csio_rd_reg64(__hw, __mb + 8), \ | ||
58 | (unsigned long long)csio_rd_reg64(__hw, __mb + 16), \ | ||
59 | (unsigned long long)csio_rd_reg64(__hw, __mb + 24), \ | ||
60 | (unsigned long long)csio_rd_reg64(__hw, __mb + 32), \ | ||
61 | (unsigned long long)csio_rd_reg64(__hw, __mb + 40), \ | ||
62 | (unsigned long long)csio_rd_reg64(__hw, __mb + 48), \ | ||
63 | (unsigned long long)csio_rd_reg64(__hw, __mb + 56)) | ||
64 | |||
65 | #define CSIO_MB_MAX_REGS 8 | ||
66 | #define CSIO_MAX_MB_SIZE 64 | ||
67 | #define CSIO_MB_POLL_FREQ 5 /* 5 ms */ | ||
68 | #define CSIO_MB_DEFAULT_TMO FW_CMD_MAX_TIMEOUT | ||
69 | |||
70 | /* Device master in HELLO command */ | ||
71 | enum csio_dev_master { CSIO_MASTER_CANT, CSIO_MASTER_MAY, CSIO_MASTER_MUST }; | ||
72 | |||
73 | enum csio_mb_owner { CSIO_MBOWNER_NONE, CSIO_MBOWNER_FW, CSIO_MBOWNER_PL }; | ||
74 | |||
75 | enum csio_dev_state { | ||
76 | CSIO_DEV_STATE_UNINIT, | ||
77 | CSIO_DEV_STATE_INIT, | ||
78 | CSIO_DEV_STATE_ERR | ||
79 | }; | ||
80 | |||
81 | #define FW_PARAM_DEV(param) \ | ||
82 | (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ | ||
83 | FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) | ||
84 | |||
85 | #define FW_PARAM_PFVF(param) \ | ||
86 | (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ | ||
87 | FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \ | ||
88 | FW_PARAMS_PARAM_Y(0) | \ | ||
89 | FW_PARAMS_PARAM_Z(0)) | ||
90 | |||
91 | enum { | ||
92 | PAUSE_RX = 1 << 0, | ||
93 | PAUSE_TX = 1 << 1, | ||
94 | PAUSE_AUTONEG = 1 << 2 | ||
95 | }; | ||
96 | |||
97 | #define CSIO_INIT_MBP(__mbp, __cp, __tmo, __priv, __fn, __clear) \ | ||
98 | do { \ | ||
99 | if (__clear) \ | ||
100 | memset((__cp), 0, \ | ||
101 | CSIO_MB_MAX_REGS * sizeof(__be64)); \ | ||
102 | INIT_LIST_HEAD(&(__mbp)->list); \ | ||
103 | (__mbp)->tmo = (__tmo); \ | ||
104 | (__mbp)->priv = (void *)(__priv); \ | ||
105 | (__mbp)->mb_cbfn = (__fn); \ | ||
106 | (__mbp)->mb_size = sizeof(*(__cp)); \ | ||
107 | } while (0) | ||
108 | |||
109 | struct csio_mbm_stats { | ||
110 | uint32_t n_req; /* number of mbox req */ | ||
111 | uint32_t n_rsp; /* number of mbox rsp */ | ||
112 | uint32_t n_activeq; /* number of mbox req active Q */ | ||
113 | uint32_t n_cbfnq; /* number of mbox req cbfn Q */ | ||
114 | uint32_t n_tmo; /* number of mbox timeout */ | ||
115 | uint32_t n_cancel; /* number of mbox cancel */ | ||
116 | uint32_t n_err; /* number of mbox error */ | ||
117 | }; | ||
118 | |||
119 | /* Driver version of Mailbox */ | ||
120 | struct csio_mb { | ||
121 | struct list_head list; /* for req/resp */ | ||
122 | /* queue in driver */ | ||
123 | __be64 mb[CSIO_MB_MAX_REGS]; /* MB in HW format */ | ||
124 | int mb_size; /* Size of this | ||
125 | * mailbox. | ||
126 | */ | ||
127 | uint32_t tmo; /* Timeout */ | ||
128 | struct completion cmplobj; /* MB Completion | ||
129 | * object | ||
130 | */ | ||
131 | void (*mb_cbfn) (struct csio_hw *, struct csio_mb *); | ||
132 | /* Callback fn */ | ||
133 | void *priv; /* Owner private ptr */ | ||
134 | }; | ||
135 | |||
136 | struct csio_mbm { | ||
137 | uint32_t a_mbox; /* Async mbox num */ | ||
138 | uint32_t intr_idx; /* Interrupt index */ | ||
139 | struct timer_list timer; /* Mbox timer */ | ||
140 | struct list_head req_q; /* Mbox request queue */ | ||
141 | struct list_head cbfn_q; /* Mbox completion q */ | ||
142 | struct csio_mb *mcurrent; /* Current mailbox */ | ||
143 | uint32_t req_q_cnt; /* Outstanding mbox | ||
144 | * cmds | ||
145 | */ | ||
146 | struct csio_mbm_stats stats; /* Statistics */ | ||
147 | }; | ||
148 | |||
149 | #define csio_set_mb_intr_idx(_m, _i) ((_m)->intr_idx = (_i)) | ||
150 | #define csio_get_mb_intr_idx(_m) ((_m)->intr_idx) | ||
151 | |||
152 | struct csio_iq_params; | ||
153 | struct csio_eq_params; | ||
154 | |||
155 | enum fw_retval csio_mb_fw_retval(struct csio_mb *); | ||
156 | |||
157 | /* MB helpers */ | ||
158 | void csio_mb_hello(struct csio_hw *, struct csio_mb *, uint32_t, | ||
159 | uint32_t, uint32_t, enum csio_dev_master, | ||
160 | void (*)(struct csio_hw *, struct csio_mb *)); | ||
161 | |||
162 | void csio_mb_process_hello_rsp(struct csio_hw *, struct csio_mb *, | ||
163 | enum fw_retval *, enum csio_dev_state *, | ||
164 | uint8_t *); | ||
165 | |||
166 | void csio_mb_bye(struct csio_hw *, struct csio_mb *, uint32_t, | ||
167 | void (*)(struct csio_hw *, struct csio_mb *)); | ||
168 | |||
169 | void csio_mb_reset(struct csio_hw *, struct csio_mb *, uint32_t, int, int, | ||
170 | void (*)(struct csio_hw *, struct csio_mb *)); | ||
171 | |||
172 | void csio_mb_params(struct csio_hw *, struct csio_mb *, uint32_t, unsigned int, | ||
173 | unsigned int, unsigned int, const u32 *, u32 *, bool, | ||
174 | void (*)(struct csio_hw *, struct csio_mb *)); | ||
175 | |||
176 | void csio_mb_process_read_params_rsp(struct csio_hw *, struct csio_mb *, | ||
177 | enum fw_retval *, unsigned int , u32 *); | ||
178 | |||
179 | void csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, | ||
180 | int reg); | ||
181 | |||
182 | void csio_mb_caps_config(struct csio_hw *, struct csio_mb *, uint32_t, | ||
183 | bool, bool, bool, bool, | ||
184 | void (*)(struct csio_hw *, struct csio_mb *)); | ||
185 | |||
186 | void csio_rss_glb_config(struct csio_hw *, struct csio_mb *, | ||
187 | uint32_t, uint8_t, unsigned int, | ||
188 | void (*)(struct csio_hw *, struct csio_mb *)); | ||
189 | |||
190 | void csio_mb_pfvf(struct csio_hw *, struct csio_mb *, uint32_t, | ||
191 | unsigned int, unsigned int, unsigned int, | ||
192 | unsigned int, unsigned int, unsigned int, | ||
193 | unsigned int, unsigned int, unsigned int, | ||
194 | unsigned int, unsigned int, unsigned int, | ||
195 | unsigned int, void (*) (struct csio_hw *, struct csio_mb *)); | ||
196 | |||
197 | void csio_mb_port(struct csio_hw *, struct csio_mb *, uint32_t, | ||
198 | uint8_t, bool, uint32_t, uint16_t, | ||
199 | void (*) (struct csio_hw *, struct csio_mb *)); | ||
200 | |||
201 | void csio_mb_process_read_port_rsp(struct csio_hw *, struct csio_mb *, | ||
202 | enum fw_retval *, uint16_t *); | ||
203 | |||
204 | void csio_mb_initialize(struct csio_hw *, struct csio_mb *, uint32_t, | ||
205 | void (*)(struct csio_hw *, struct csio_mb *)); | ||
206 | |||
207 | void csio_mb_iq_alloc_write(struct csio_hw *, struct csio_mb *, void *, | ||
208 | uint32_t, struct csio_iq_params *, | ||
209 | void (*) (struct csio_hw *, struct csio_mb *)); | ||
210 | |||
211 | void csio_mb_iq_alloc_write_rsp(struct csio_hw *, struct csio_mb *, | ||
212 | enum fw_retval *, struct csio_iq_params *); | ||
213 | |||
214 | void csio_mb_iq_free(struct csio_hw *, struct csio_mb *, void *, | ||
215 | uint32_t, struct csio_iq_params *, | ||
216 | void (*) (struct csio_hw *, struct csio_mb *)); | ||
217 | |||
218 | void csio_mb_eq_ofld_alloc_write(struct csio_hw *, struct csio_mb *, void *, | ||
219 | uint32_t, struct csio_eq_params *, | ||
220 | void (*) (struct csio_hw *, struct csio_mb *)); | ||
221 | |||
222 | void csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *, struct csio_mb *, | ||
223 | enum fw_retval *, struct csio_eq_params *); | ||
224 | |||
225 | void csio_mb_eq_ofld_free(struct csio_hw *, struct csio_mb *, void *, | ||
226 | uint32_t , struct csio_eq_params *, | ||
227 | void (*) (struct csio_hw *, struct csio_mb *)); | ||
228 | |||
229 | void csio_fcoe_read_res_info_init_mb(struct csio_hw *, struct csio_mb *, | ||
230 | uint32_t, | ||
231 | void (*) (struct csio_hw *, struct csio_mb *)); | ||
232 | |||
233 | void csio_write_fcoe_link_cond_init_mb(struct csio_lnode *, struct csio_mb *, | ||
234 | uint32_t, uint8_t, uint32_t, uint8_t, bool, uint32_t, | ||
235 | void (*) (struct csio_hw *, struct csio_mb *)); | ||
236 | |||
237 | void csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *, struct csio_mb *, | ||
238 | uint32_t, uint32_t , uint32_t , uint16_t, | ||
239 | uint8_t [8], uint8_t [8], | ||
240 | void (*) (struct csio_hw *, struct csio_mb *)); | ||
241 | |||
242 | void csio_fcoe_vnp_read_init_mb(struct csio_lnode *, struct csio_mb *, | ||
243 | uint32_t, uint32_t , uint32_t , | ||
244 | void (*) (struct csio_hw *, struct csio_mb *)); | ||
245 | |||
246 | void csio_fcoe_vnp_free_init_mb(struct csio_lnode *, struct csio_mb *, | ||
247 | uint32_t , uint32_t, uint32_t , | ||
248 | void (*) (struct csio_hw *, struct csio_mb *)); | ||
249 | |||
250 | void csio_fcoe_read_fcf_init_mb(struct csio_lnode *, struct csio_mb *, | ||
251 | uint32_t, uint32_t, uint32_t, | ||
252 | void (*cbfn) (struct csio_hw *, struct csio_mb *)); | ||
253 | |||
254 | void csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, | ||
255 | struct csio_mb *mbp, uint32_t mb_tmo, | ||
256 | struct fw_fcoe_port_cmd_params *portparams, | ||
257 | void (*cbfn)(struct csio_hw *, struct csio_mb *)); | ||
258 | |||
259 | void csio_mb_process_portparams_rsp(struct csio_hw *hw, struct csio_mb *mbp, | ||
260 | enum fw_retval *retval, | ||
261 | struct fw_fcoe_port_cmd_params *portparams, | ||
262 | struct fw_fcoe_port_stats *portstats); | ||
263 | |||
264 | /* MB module functions */ | ||
265 | int csio_mbm_init(struct csio_mbm *, struct csio_hw *, | ||
266 | void (*)(uintptr_t)); | ||
267 | void csio_mbm_exit(struct csio_mbm *); | ||
268 | void csio_mb_intr_enable(struct csio_hw *); | ||
269 | void csio_mb_intr_disable(struct csio_hw *); | ||
270 | |||
271 | int csio_mb_issue(struct csio_hw *, struct csio_mb *); | ||
272 | void csio_mb_completions(struct csio_hw *, struct list_head *); | ||
273 | int csio_mb_fwevt_handler(struct csio_hw *, __be64 *); | ||
274 | int csio_mb_isr_handler(struct csio_hw *); | ||
275 | struct csio_mb *csio_mb_tmo_handler(struct csio_hw *); | ||
276 | void csio_mb_cancel_all(struct csio_hw *, struct list_head *); | ||
277 | |||
278 | #endif /* ifndef __CSIO_MB_H__ */ | ||
diff --git a/drivers/scsi/csiostor/csio_rnode.c b/drivers/scsi/csiostor/csio_rnode.c new file mode 100644 index 000000000000..b0ae430e436a --- /dev/null +++ b/drivers/scsi/csiostor/csio_rnode.c | |||
@@ -0,0 +1,912 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/string.h> | ||
36 | #include <scsi/scsi_device.h> | ||
37 | #include <scsi/scsi_transport_fc.h> | ||
38 | #include <scsi/fc/fc_els.h> | ||
39 | #include <scsi/fc/fc_fs.h> | ||
40 | |||
41 | #include "csio_hw.h" | ||
42 | #include "csio_lnode.h" | ||
43 | #include "csio_rnode.h" | ||
44 | |||
45 | static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *); | ||
46 | static void csio_rnode_exit(struct csio_rnode *); | ||
47 | |||
48 | /* Static machine forward declarations */ | ||
49 | static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev); | ||
50 | static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev); | ||
51 | static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev); | ||
52 | static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev); | ||
53 | |||
54 | /* RNF event mapping */ | ||
55 | static enum csio_rn_ev fwevt_to_rnevt[] = { | ||
56 | CSIO_RNFE_NONE, /* None */ | ||
57 | CSIO_RNFE_LOGGED_IN, /* PLOGI_ACC_RCVD */ | ||
58 | CSIO_RNFE_NONE, /* PLOGI_RJT_RCVD */ | ||
59 | CSIO_RNFE_PLOGI_RECV, /* PLOGI_RCVD */ | ||
60 | CSIO_RNFE_LOGO_RECV, /* PLOGO_RCVD */ | ||
61 | CSIO_RNFE_PRLI_DONE, /* PRLI_ACC_RCVD */ | ||
62 | CSIO_RNFE_NONE, /* PRLI_RJT_RCVD */ | ||
63 | CSIO_RNFE_PRLI_RECV, /* PRLI_RCVD */ | ||
64 | CSIO_RNFE_PRLO_RECV, /* PRLO_RCVD */ | ||
65 | CSIO_RNFE_NONE, /* NPORT_ID_CHGD */ | ||
66 | CSIO_RNFE_LOGO_RECV, /* FLOGO_RCVD */ | ||
67 | CSIO_RNFE_NONE, /* CLR_VIRT_LNK_RCVD */ | ||
68 | CSIO_RNFE_LOGGED_IN, /* FLOGI_ACC_RCVD */ | ||
69 | CSIO_RNFE_NONE, /* FLOGI_RJT_RCVD */ | ||
70 | CSIO_RNFE_LOGGED_IN, /* FDISC_ACC_RCVD */ | ||
71 | CSIO_RNFE_NONE, /* FDISC_RJT_RCVD */ | ||
72 | CSIO_RNFE_NONE, /* FLOGI_TMO_MAX_RETRY */ | ||
73 | CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_ACC */ | ||
74 | CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_RJT */ | ||
75 | CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_CNFLT */ | ||
76 | CSIO_RNFE_NONE, /* PRLI_TMO */ | ||
77 | CSIO_RNFE_NONE, /* ADISC_TMO */ | ||
78 | CSIO_RNFE_NAME_MISSING, /* RSCN_DEV_LOST */ | ||
79 | CSIO_RNFE_NONE, /* SCR_ACC_RCVD */ | ||
80 | CSIO_RNFE_NONE, /* ADISC_RJT_RCVD */ | ||
81 | CSIO_RNFE_NONE, /* LOGO_SNT */ | ||
82 | CSIO_RNFE_LOGO_RECV, /* PROTO_ERR_IMPL_LOGO */ | ||
83 | }; | ||
84 | |||
85 | #define CSIO_FWE_TO_RNFE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \ | ||
86 | CSIO_RNFE_NONE : \ | ||
87 | fwevt_to_rnevt[_evt]) | ||
88 | int | ||
89 | csio_is_rnode_ready(struct csio_rnode *rn) | ||
90 | { | ||
91 | return csio_match_state(rn, csio_rns_ready); | ||
92 | } | ||
93 | |||
94 | static int | ||
95 | csio_is_rnode_uninit(struct csio_rnode *rn) | ||
96 | { | ||
97 | return csio_match_state(rn, csio_rns_uninit); | ||
98 | } | ||
99 | |||
100 | static int | ||
101 | csio_is_rnode_wka(uint8_t rport_type) | ||
102 | { | ||
103 | if ((rport_type == FLOGI_VFPORT) || | ||
104 | (rport_type == FDISC_VFPORT) || | ||
105 | (rport_type == NS_VNPORT) || | ||
106 | (rport_type == FDMI_VNPORT)) | ||
107 | return 1; | ||
108 | |||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * csio_rn_lookup - Finds the rnode with the given flowid | ||
114 | * @ln - lnode | ||
115 | * @flowid - flowid. | ||
116 | * | ||
117 | * Does the rnode lookup on the given lnode and flowid.If no matching entry | ||
118 | * found, NULL is returned. | ||
119 | */ | ||
120 | static struct csio_rnode * | ||
121 | csio_rn_lookup(struct csio_lnode *ln, uint32_t flowid) | ||
122 | { | ||
123 | struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; | ||
124 | struct list_head *tmp; | ||
125 | struct csio_rnode *rn; | ||
126 | |||
127 | list_for_each(tmp, &rnhead->sm.sm_list) { | ||
128 | rn = (struct csio_rnode *) tmp; | ||
129 | if (rn->flowid == flowid) | ||
130 | return rn; | ||
131 | } | ||
132 | |||
133 | return NULL; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * csio_rn_lookup_wwpn - Finds the rnode with the given wwpn | ||
138 | * @ln: lnode | ||
139 | * @wwpn: wwpn | ||
140 | * | ||
141 | * Does the rnode lookup on the given lnode and wwpn. If no matching entry | ||
142 | * found, NULL is returned. | ||
143 | */ | ||
144 | static struct csio_rnode * | ||
145 | csio_rn_lookup_wwpn(struct csio_lnode *ln, uint8_t *wwpn) | ||
146 | { | ||
147 | struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; | ||
148 | struct list_head *tmp; | ||
149 | struct csio_rnode *rn; | ||
150 | |||
151 | list_for_each(tmp, &rnhead->sm.sm_list) { | ||
152 | rn = (struct csio_rnode *) tmp; | ||
153 | if (!memcmp(csio_rn_wwpn(rn), wwpn, 8)) | ||
154 | return rn; | ||
155 | } | ||
156 | |||
157 | return NULL; | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * csio_rnode_lookup_portid - Finds the rnode with the given portid | ||
162 | * @ln: lnode | ||
163 | * @portid: port id | ||
164 | * | ||
165 | * Lookup the rnode list for a given portid. If no matching entry | ||
166 | * found, NULL is returned. | ||
167 | */ | ||
168 | struct csio_rnode * | ||
169 | csio_rnode_lookup_portid(struct csio_lnode *ln, uint32_t portid) | ||
170 | { | ||
171 | struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; | ||
172 | struct list_head *tmp; | ||
173 | struct csio_rnode *rn; | ||
174 | |||
175 | list_for_each(tmp, &rnhead->sm.sm_list) { | ||
176 | rn = (struct csio_rnode *) tmp; | ||
177 | if (rn->nport_id == portid) | ||
178 | return rn; | ||
179 | } | ||
180 | |||
181 | return NULL; | ||
182 | } | ||
183 | |||
184 | static int | ||
185 | csio_rn_dup_flowid(struct csio_lnode *ln, uint32_t rdev_flowid, | ||
186 | uint32_t *vnp_flowid) | ||
187 | { | ||
188 | struct csio_rnode *rnhead; | ||
189 | struct list_head *tmp, *tmp1; | ||
190 | struct csio_rnode *rn; | ||
191 | struct csio_lnode *ln_tmp; | ||
192 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
193 | |||
194 | list_for_each(tmp1, &hw->sln_head) { | ||
195 | ln_tmp = (struct csio_lnode *) tmp1; | ||
196 | if (ln_tmp == ln) | ||
197 | continue; | ||
198 | |||
199 | rnhead = (struct csio_rnode *)&ln_tmp->rnhead; | ||
200 | list_for_each(tmp, &rnhead->sm.sm_list) { | ||
201 | |||
202 | rn = (struct csio_rnode *) tmp; | ||
203 | if (csio_is_rnode_ready(rn)) { | ||
204 | if (rn->flowid == rdev_flowid) { | ||
205 | *vnp_flowid = csio_ln_flowid(ln_tmp); | ||
206 | return 1; | ||
207 | } | ||
208 | } | ||
209 | } | ||
210 | } | ||
211 | |||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | static struct csio_rnode * | ||
216 | csio_alloc_rnode(struct csio_lnode *ln) | ||
217 | { | ||
218 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
219 | |||
220 | struct csio_rnode *rn = mempool_alloc(hw->rnode_mempool, GFP_ATOMIC); | ||
221 | if (!rn) | ||
222 | goto err; | ||
223 | |||
224 | memset(rn, 0, sizeof(struct csio_rnode)); | ||
225 | if (csio_rnode_init(rn, ln)) | ||
226 | goto err_free; | ||
227 | |||
228 | CSIO_INC_STATS(ln, n_rnode_alloc); | ||
229 | |||
230 | return rn; | ||
231 | |||
232 | err_free: | ||
233 | mempool_free(rn, hw->rnode_mempool); | ||
234 | err: | ||
235 | CSIO_INC_STATS(ln, n_rnode_nomem); | ||
236 | return NULL; | ||
237 | } | ||
238 | |||
239 | static void | ||
240 | csio_free_rnode(struct csio_rnode *rn) | ||
241 | { | ||
242 | struct csio_hw *hw = csio_lnode_to_hw(csio_rnode_to_lnode(rn)); | ||
243 | |||
244 | csio_rnode_exit(rn); | ||
245 | CSIO_INC_STATS(rn->lnp, n_rnode_free); | ||
246 | mempool_free(rn, hw->rnode_mempool); | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * csio_get_rnode - Gets rnode with the given flowid | ||
251 | * @ln - lnode | ||
252 | * @flowid - flow id. | ||
253 | * | ||
254 | * Does the rnode lookup on the given lnode and flowid. If no matching | ||
255 | * rnode found, then new rnode with given npid is allocated and returned. | ||
256 | */ | ||
257 | static struct csio_rnode * | ||
258 | csio_get_rnode(struct csio_lnode *ln, uint32_t flowid) | ||
259 | { | ||
260 | struct csio_rnode *rn; | ||
261 | |||
262 | rn = csio_rn_lookup(ln, flowid); | ||
263 | if (!rn) { | ||
264 | rn = csio_alloc_rnode(ln); | ||
265 | if (!rn) | ||
266 | return NULL; | ||
267 | |||
268 | rn->flowid = flowid; | ||
269 | } | ||
270 | |||
271 | return rn; | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * csio_put_rnode - Frees the given rnode | ||
276 | * @ln - lnode | ||
277 | * @flowid - flow id. | ||
278 | * | ||
279 | * Does the rnode lookup on the given lnode and flowid. If no matching | ||
280 | * rnode found, then new rnode with given npid is allocated and returned. | ||
281 | */ | ||
282 | void | ||
283 | csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn) | ||
284 | { | ||
285 | CSIO_DB_ASSERT(csio_is_rnode_uninit(rn) != 0); | ||
286 | csio_free_rnode(rn); | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * csio_confirm_rnode - confirms rnode based on wwpn. | ||
291 | * @ln: lnode | ||
292 | * @rdev_flowid: remote device flowid | ||
293 | * @rdevp: remote device params | ||
294 | * This routines searches other rnode in list having same wwpn of new rnode. | ||
295 | * If there is a match, then matched rnode is returned and otherwise new rnode | ||
296 | * is returned. | ||
297 | * returns rnode. | ||
298 | */ | ||
299 | struct csio_rnode * | ||
300 | csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid, | ||
301 | struct fcoe_rdev_entry *rdevp) | ||
302 | { | ||
303 | uint8_t rport_type; | ||
304 | struct csio_rnode *rn, *match_rn; | ||
305 | uint32_t vnp_flowid; | ||
306 | uint32_t *port_id; | ||
307 | |||
308 | port_id = (uint32_t *)&rdevp->r_id[0]; | ||
309 | rport_type = | ||
310 | FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type); | ||
311 | |||
312 | /* Drop rdev event for cntrl port */ | ||
313 | if (rport_type == FAB_CTLR_VNPORT) { | ||
314 | csio_ln_dbg(ln, | ||
315 | "Unhandled rport_type:%d recv in rdev evt " | ||
316 | "ssni:x%x\n", rport_type, rdev_flowid); | ||
317 | return NULL; | ||
318 | } | ||
319 | |||
320 | /* Lookup on flowid */ | ||
321 | rn = csio_rn_lookup(ln, rdev_flowid); | ||
322 | if (!rn) { | ||
323 | |||
324 | /* Drop events with duplicate flowid */ | ||
325 | if (csio_rn_dup_flowid(ln, rdev_flowid, &vnp_flowid)) { | ||
326 | csio_ln_warn(ln, | ||
327 | "ssni:%x already active on vnpi:%x", | ||
328 | rdev_flowid, vnp_flowid); | ||
329 | return NULL; | ||
330 | } | ||
331 | |||
332 | /* Lookup on wwpn for NPORTs */ | ||
333 | rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn); | ||
334 | if (!rn) | ||
335 | goto alloc_rnode; | ||
336 | |||
337 | } else { | ||
338 | /* Lookup well-known ports with nport id */ | ||
339 | if (csio_is_rnode_wka(rport_type)) { | ||
340 | match_rn = csio_rnode_lookup_portid(ln, | ||
341 | ((ntohl(*port_id) >> 8) & CSIO_DID_MASK)); | ||
342 | if (match_rn == NULL) { | ||
343 | csio_rn_flowid(rn) = CSIO_INVALID_IDX; | ||
344 | goto alloc_rnode; | ||
345 | } | ||
346 | |||
347 | /* | ||
348 | * Now compare the wwpn to confirm that | ||
349 | * same port relogged in. If so update the matched rn. | ||
350 | * Else, go ahead and alloc a new rnode. | ||
351 | */ | ||
352 | if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) { | ||
353 | if (csio_is_rnode_ready(rn)) { | ||
354 | csio_ln_warn(ln, | ||
355 | "rnode is already" | ||
356 | "active ssni:x%x\n", | ||
357 | rdev_flowid); | ||
358 | CSIO_ASSERT(0); | ||
359 | } | ||
360 | csio_rn_flowid(rn) = CSIO_INVALID_IDX; | ||
361 | rn = match_rn; | ||
362 | |||
363 | /* Update rn */ | ||
364 | goto found_rnode; | ||
365 | } | ||
366 | csio_rn_flowid(rn) = CSIO_INVALID_IDX; | ||
367 | goto alloc_rnode; | ||
368 | } | ||
369 | |||
370 | /* wwpn match */ | ||
371 | if (!memcmp(csio_rn_wwpn(rn), rdevp->wwpn, 8)) | ||
372 | goto found_rnode; | ||
373 | |||
374 | /* Search for rnode that have same wwpn */ | ||
375 | match_rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn); | ||
376 | if (match_rn != NULL) { | ||
377 | csio_ln_dbg(ln, | ||
378 | "ssni:x%x changed for rport name(wwpn):%llx " | ||
379 | "did:x%x\n", rdev_flowid, | ||
380 | wwn_to_u64(rdevp->wwpn), | ||
381 | match_rn->nport_id); | ||
382 | csio_rn_flowid(rn) = CSIO_INVALID_IDX; | ||
383 | rn = match_rn; | ||
384 | } else { | ||
385 | csio_ln_dbg(ln, | ||
386 | "rnode wwpn mismatch found ssni:x%x " | ||
387 | "name(wwpn):%llx\n", | ||
388 | rdev_flowid, | ||
389 | wwn_to_u64(csio_rn_wwpn(rn))); | ||
390 | if (csio_is_rnode_ready(rn)) { | ||
391 | csio_ln_warn(ln, | ||
392 | "rnode is already active " | ||
393 | "wwpn:%llx ssni:x%x\n", | ||
394 | wwn_to_u64(csio_rn_wwpn(rn)), | ||
395 | rdev_flowid); | ||
396 | CSIO_ASSERT(0); | ||
397 | } | ||
398 | csio_rn_flowid(rn) = CSIO_INVALID_IDX; | ||
399 | goto alloc_rnode; | ||
400 | } | ||
401 | } | ||
402 | |||
403 | found_rnode: | ||
404 | csio_ln_dbg(ln, "found rnode:%p ssni:x%x name(wwpn):%llx\n", | ||
405 | rn, rdev_flowid, wwn_to_u64(rdevp->wwpn)); | ||
406 | |||
407 | /* Update flowid */ | ||
408 | csio_rn_flowid(rn) = rdev_flowid; | ||
409 | |||
410 | /* update rdev entry */ | ||
411 | rn->rdev_entry = rdevp; | ||
412 | CSIO_INC_STATS(ln, n_rnode_match); | ||
413 | return rn; | ||
414 | |||
415 | alloc_rnode: | ||
416 | rn = csio_get_rnode(ln, rdev_flowid); | ||
417 | if (!rn) | ||
418 | return NULL; | ||
419 | |||
420 | csio_ln_dbg(ln, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n", | ||
421 | rn, rdev_flowid, wwn_to_u64(rdevp->wwpn)); | ||
422 | |||
423 | /* update rdev entry */ | ||
424 | rn->rdev_entry = rdevp; | ||
425 | return rn; | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | * csio_rn_verify_rparams - verify rparams. | ||
430 | * @ln: lnode | ||
431 | * @rn: rnode | ||
432 | * @rdevp: remote device params | ||
433 | * returns success if rparams are verified. | ||
434 | */ | ||
435 | static int | ||
436 | csio_rn_verify_rparams(struct csio_lnode *ln, struct csio_rnode *rn, | ||
437 | struct fcoe_rdev_entry *rdevp) | ||
438 | { | ||
439 | uint8_t null[8]; | ||
440 | uint8_t rport_type; | ||
441 | uint8_t fc_class; | ||
442 | uint32_t *did; | ||
443 | |||
444 | did = (uint32_t *) &rdevp->r_id[0]; | ||
445 | rport_type = | ||
446 | FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type); | ||
447 | switch (rport_type) { | ||
448 | case FLOGI_VFPORT: | ||
449 | rn->role = CSIO_RNFR_FABRIC; | ||
450 | if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_FLOGI) { | ||
451 | csio_ln_err(ln, "ssni:x%x invalid fabric portid\n", | ||
452 | csio_rn_flowid(rn)); | ||
453 | return -EINVAL; | ||
454 | } | ||
455 | /* NPIV support */ | ||
456 | if (FW_RDEV_WR_NPIV_GET(rdevp->vft_to_qos)) | ||
457 | ln->flags |= CSIO_LNF_NPIVSUPP; | ||
458 | |||
459 | break; | ||
460 | |||
461 | case NS_VNPORT: | ||
462 | rn->role = CSIO_RNFR_NS; | ||
463 | if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_DIR_SERV) { | ||
464 | csio_ln_err(ln, "ssni:x%x invalid fabric portid\n", | ||
465 | csio_rn_flowid(rn)); | ||
466 | return -EINVAL; | ||
467 | } | ||
468 | break; | ||
469 | |||
470 | case REG_FC4_VNPORT: | ||
471 | case REG_VNPORT: | ||
472 | rn->role = CSIO_RNFR_NPORT; | ||
473 | if (rdevp->event_cause == PRLI_ACC_RCVD || | ||
474 | rdevp->event_cause == PRLI_RCVD) { | ||
475 | if (FW_RDEV_WR_TASK_RETRY_ID_GET( | ||
476 | rdevp->enh_disc_to_tgt)) | ||
477 | rn->fcp_flags |= FCP_SPPF_OVLY_ALLOW; | ||
478 | |||
479 | if (FW_RDEV_WR_RETRY_GET(rdevp->enh_disc_to_tgt)) | ||
480 | rn->fcp_flags |= FCP_SPPF_RETRY; | ||
481 | |||
482 | if (FW_RDEV_WR_CONF_CMPL_GET(rdevp->enh_disc_to_tgt)) | ||
483 | rn->fcp_flags |= FCP_SPPF_CONF_COMPL; | ||
484 | |||
485 | if (FW_RDEV_WR_TGT_GET(rdevp->enh_disc_to_tgt)) | ||
486 | rn->role |= CSIO_RNFR_TARGET; | ||
487 | |||
488 | if (FW_RDEV_WR_INI_GET(rdevp->enh_disc_to_tgt)) | ||
489 | rn->role |= CSIO_RNFR_INITIATOR; | ||
490 | } | ||
491 | |||
492 | break; | ||
493 | |||
494 | case FDMI_VNPORT: | ||
495 | case FAB_CTLR_VNPORT: | ||
496 | rn->role = 0; | ||
497 | break; | ||
498 | |||
499 | default: | ||
500 | csio_ln_err(ln, "ssni:x%x invalid rport type recv x%x\n", | ||
501 | csio_rn_flowid(rn), rport_type); | ||
502 | return -EINVAL; | ||
503 | } | ||
504 | |||
505 | /* validate wwpn/wwnn for Name server/remote port */ | ||
506 | if (rport_type == REG_VNPORT || rport_type == NS_VNPORT) { | ||
507 | memset(null, 0, 8); | ||
508 | if (!memcmp(rdevp->wwnn, null, 8)) { | ||
509 | csio_ln_err(ln, | ||
510 | "ssni:x%x invalid wwnn received from" | ||
511 | " rport did:x%x\n", | ||
512 | csio_rn_flowid(rn), | ||
513 | (ntohl(*did) & CSIO_DID_MASK)); | ||
514 | return -EINVAL; | ||
515 | } | ||
516 | |||
517 | if (!memcmp(rdevp->wwpn, null, 8)) { | ||
518 | csio_ln_err(ln, | ||
519 | "ssni:x%x invalid wwpn received from" | ||
520 | " rport did:x%x\n", | ||
521 | csio_rn_flowid(rn), | ||
522 | (ntohl(*did) & CSIO_DID_MASK)); | ||
523 | return -EINVAL; | ||
524 | } | ||
525 | |||
526 | } | ||
527 | |||
528 | /* Copy wwnn, wwpn and nport id */ | ||
529 | rn->nport_id = (ntohl(*did) >> 8) & CSIO_DID_MASK; | ||
530 | memcpy(csio_rn_wwnn(rn), rdevp->wwnn, 8); | ||
531 | memcpy(csio_rn_wwpn(rn), rdevp->wwpn, 8); | ||
532 | rn->rn_sparm.csp.sp_bb_data = ntohs(rdevp->rcv_fr_sz); | ||
533 | fc_class = FW_RDEV_WR_CLASS_GET(rdevp->vft_to_qos); | ||
534 | rn->rn_sparm.clsp[fc_class - 1].cp_class = htons(FC_CPC_VALID); | ||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | static void | ||
539 | __csio_reg_rnode(struct csio_rnode *rn) | ||
540 | { | ||
541 | struct csio_lnode *ln = csio_rnode_to_lnode(rn); | ||
542 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
543 | |||
544 | spin_unlock_irq(&hw->lock); | ||
545 | csio_reg_rnode(rn); | ||
546 | spin_lock_irq(&hw->lock); | ||
547 | |||
548 | if (rn->role & CSIO_RNFR_TARGET) | ||
549 | ln->n_scsi_tgts++; | ||
550 | |||
551 | if (rn->nport_id == FC_FID_MGMT_SERV) | ||
552 | csio_ln_fdmi_start(ln, (void *) rn); | ||
553 | } | ||
554 | |||
555 | static void | ||
556 | __csio_unreg_rnode(struct csio_rnode *rn) | ||
557 | { | ||
558 | struct csio_lnode *ln = csio_rnode_to_lnode(rn); | ||
559 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
560 | LIST_HEAD(tmp_q); | ||
561 | int cmpl = 0; | ||
562 | |||
563 | if (!list_empty(&rn->host_cmpl_q)) { | ||
564 | csio_dbg(hw, "Returning completion queue I/Os\n"); | ||
565 | list_splice_tail_init(&rn->host_cmpl_q, &tmp_q); | ||
566 | cmpl = 1; | ||
567 | } | ||
568 | |||
569 | if (rn->role & CSIO_RNFR_TARGET) { | ||
570 | ln->n_scsi_tgts--; | ||
571 | ln->last_scan_ntgts--; | ||
572 | } | ||
573 | |||
574 | spin_unlock_irq(&hw->lock); | ||
575 | csio_unreg_rnode(rn); | ||
576 | spin_lock_irq(&hw->lock); | ||
577 | |||
578 | /* Cleanup I/Os that were waiting for rnode to unregister */ | ||
579 | if (cmpl) | ||
580 | csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw), &tmp_q); | ||
581 | |||
582 | } | ||
583 | |||
584 | /*****************************************************************************/ | ||
585 | /* START: Rnode SM */ | ||
586 | /*****************************************************************************/ | ||
587 | |||
588 | /* | ||
589 | * csio_rns_uninit - | ||
590 | * @rn - rnode | ||
591 | * @evt - SM event. | ||
592 | * | ||
593 | */ | ||
594 | static void | ||
595 | csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt) | ||
596 | { | ||
597 | struct csio_lnode *ln = csio_rnode_to_lnode(rn); | ||
598 | int ret = 0; | ||
599 | |||
600 | CSIO_INC_STATS(rn, n_evt_sm[evt]); | ||
601 | |||
602 | switch (evt) { | ||
603 | case CSIO_RNFE_LOGGED_IN: | ||
604 | case CSIO_RNFE_PLOGI_RECV: | ||
605 | ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry); | ||
606 | if (!ret) { | ||
607 | csio_set_state(&rn->sm, csio_rns_ready); | ||
608 | __csio_reg_rnode(rn); | ||
609 | } else { | ||
610 | CSIO_INC_STATS(rn, n_err_inval); | ||
611 | } | ||
612 | break; | ||
613 | case CSIO_RNFE_LOGO_RECV: | ||
614 | csio_ln_dbg(ln, | ||
615 | "ssni:x%x Ignoring event %d recv " | ||
616 | "in rn state[uninit]\n", csio_rn_flowid(rn), evt); | ||
617 | CSIO_INC_STATS(rn, n_evt_drop); | ||
618 | break; | ||
619 | default: | ||
620 | csio_ln_dbg(ln, | ||
621 | "ssni:x%x unexp event %d recv " | ||
622 | "in rn state[uninit]\n", csio_rn_flowid(rn), evt); | ||
623 | CSIO_INC_STATS(rn, n_evt_unexp); | ||
624 | break; | ||
625 | } | ||
626 | } | ||
627 | |||
628 | /* | ||
629 | * csio_rns_ready - | ||
630 | * @rn - rnode | ||
631 | * @evt - SM event. | ||
632 | * | ||
633 | */ | ||
634 | static void | ||
635 | csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt) | ||
636 | { | ||
637 | struct csio_lnode *ln = csio_rnode_to_lnode(rn); | ||
638 | int ret = 0; | ||
639 | |||
640 | CSIO_INC_STATS(rn, n_evt_sm[evt]); | ||
641 | |||
642 | switch (evt) { | ||
643 | case CSIO_RNFE_LOGGED_IN: | ||
644 | case CSIO_RNFE_PLOGI_RECV: | ||
645 | csio_ln_dbg(ln, | ||
646 | "ssni:x%x Ignoring event %d recv from did:x%x " | ||
647 | "in rn state[ready]\n", csio_rn_flowid(rn), evt, | ||
648 | rn->nport_id); | ||
649 | CSIO_INC_STATS(rn, n_evt_drop); | ||
650 | break; | ||
651 | |||
652 | case CSIO_RNFE_PRLI_DONE: | ||
653 | case CSIO_RNFE_PRLI_RECV: | ||
654 | ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry); | ||
655 | if (!ret) | ||
656 | __csio_reg_rnode(rn); | ||
657 | else | ||
658 | CSIO_INC_STATS(rn, n_err_inval); | ||
659 | |||
660 | break; | ||
661 | case CSIO_RNFE_DOWN: | ||
662 | csio_set_state(&rn->sm, csio_rns_offline); | ||
663 | __csio_unreg_rnode(rn); | ||
664 | |||
665 | /* FW expected to internally aborted outstanding SCSI WRs | ||
666 | * and return all SCSI WRs to host with status "ABORTED". | ||
667 | */ | ||
668 | break; | ||
669 | |||
670 | case CSIO_RNFE_LOGO_RECV: | ||
671 | csio_set_state(&rn->sm, csio_rns_offline); | ||
672 | |||
673 | __csio_unreg_rnode(rn); | ||
674 | |||
675 | /* FW expected to internally aborted outstanding SCSI WRs | ||
676 | * and return all SCSI WRs to host with status "ABORTED". | ||
677 | */ | ||
678 | break; | ||
679 | |||
680 | case CSIO_RNFE_CLOSE: | ||
681 | /* | ||
682 | * Each rnode receives CLOSE event when driver is removed or | ||
683 | * device is reset | ||
684 | * Note: All outstanding IOs on remote port need to returned | ||
685 | * to uppper layer with appropriate error before sending | ||
686 | * CLOSE event | ||
687 | */ | ||
688 | csio_set_state(&rn->sm, csio_rns_uninit); | ||
689 | __csio_unreg_rnode(rn); | ||
690 | break; | ||
691 | |||
692 | case CSIO_RNFE_NAME_MISSING: | ||
693 | csio_set_state(&rn->sm, csio_rns_disappeared); | ||
694 | __csio_unreg_rnode(rn); | ||
695 | |||
696 | /* | ||
697 | * FW expected to internally aborted outstanding SCSI WRs | ||
698 | * and return all SCSI WRs to host with status "ABORTED". | ||
699 | */ | ||
700 | |||
701 | break; | ||
702 | |||
703 | default: | ||
704 | csio_ln_dbg(ln, | ||
705 | "ssni:x%x unexp event %d recv from did:x%x " | ||
706 | "in rn state[uninit]\n", csio_rn_flowid(rn), evt, | ||
707 | rn->nport_id); | ||
708 | CSIO_INC_STATS(rn, n_evt_unexp); | ||
709 | break; | ||
710 | } | ||
711 | } | ||
712 | |||
713 | /* | ||
714 | * csio_rns_offline - | ||
715 | * @rn - rnode | ||
716 | * @evt - SM event. | ||
717 | * | ||
718 | */ | ||
719 | static void | ||
720 | csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt) | ||
721 | { | ||
722 | struct csio_lnode *ln = csio_rnode_to_lnode(rn); | ||
723 | int ret = 0; | ||
724 | |||
725 | CSIO_INC_STATS(rn, n_evt_sm[evt]); | ||
726 | |||
727 | switch (evt) { | ||
728 | case CSIO_RNFE_LOGGED_IN: | ||
729 | case CSIO_RNFE_PLOGI_RECV: | ||
730 | ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry); | ||
731 | if (!ret) { | ||
732 | csio_set_state(&rn->sm, csio_rns_ready); | ||
733 | __csio_reg_rnode(rn); | ||
734 | } else { | ||
735 | CSIO_INC_STATS(rn, n_err_inval); | ||
736 | csio_post_event(&rn->sm, CSIO_RNFE_CLOSE); | ||
737 | } | ||
738 | break; | ||
739 | |||
740 | case CSIO_RNFE_DOWN: | ||
741 | csio_ln_dbg(ln, | ||
742 | "ssni:x%x Ignoring event %d recv from did:x%x " | ||
743 | "in rn state[offline]\n", csio_rn_flowid(rn), evt, | ||
744 | rn->nport_id); | ||
745 | CSIO_INC_STATS(rn, n_evt_drop); | ||
746 | break; | ||
747 | |||
748 | case CSIO_RNFE_CLOSE: | ||
749 | /* Each rnode receives CLOSE event when driver is removed or | ||
750 | * device is reset | ||
751 | * Note: All outstanding IOs on remote port need to returned | ||
752 | * to uppper layer with appropriate error before sending | ||
753 | * CLOSE event | ||
754 | */ | ||
755 | csio_set_state(&rn->sm, csio_rns_uninit); | ||
756 | break; | ||
757 | |||
758 | case CSIO_RNFE_NAME_MISSING: | ||
759 | csio_set_state(&rn->sm, csio_rns_disappeared); | ||
760 | break; | ||
761 | |||
762 | default: | ||
763 | csio_ln_dbg(ln, | ||
764 | "ssni:x%x unexp event %d recv from did:x%x " | ||
765 | "in rn state[offline]\n", csio_rn_flowid(rn), evt, | ||
766 | rn->nport_id); | ||
767 | CSIO_INC_STATS(rn, n_evt_unexp); | ||
768 | break; | ||
769 | } | ||
770 | } | ||
771 | |||
772 | /* | ||
773 | * csio_rns_disappeared - | ||
774 | * @rn - rnode | ||
775 | * @evt - SM event. | ||
776 | * | ||
777 | */ | ||
778 | static void | ||
779 | csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt) | ||
780 | { | ||
781 | struct csio_lnode *ln = csio_rnode_to_lnode(rn); | ||
782 | int ret = 0; | ||
783 | |||
784 | CSIO_INC_STATS(rn, n_evt_sm[evt]); | ||
785 | |||
786 | switch (evt) { | ||
787 | case CSIO_RNFE_LOGGED_IN: | ||
788 | case CSIO_RNFE_PLOGI_RECV: | ||
789 | ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry); | ||
790 | if (!ret) { | ||
791 | csio_set_state(&rn->sm, csio_rns_ready); | ||
792 | __csio_reg_rnode(rn); | ||
793 | } else { | ||
794 | CSIO_INC_STATS(rn, n_err_inval); | ||
795 | csio_post_event(&rn->sm, CSIO_RNFE_CLOSE); | ||
796 | } | ||
797 | break; | ||
798 | |||
799 | case CSIO_RNFE_CLOSE: | ||
800 | /* Each rnode receives CLOSE event when driver is removed or | ||
801 | * device is reset. | ||
802 | * Note: All outstanding IOs on remote port need to returned | ||
803 | * to uppper layer with appropriate error before sending | ||
804 | * CLOSE event | ||
805 | */ | ||
806 | csio_set_state(&rn->sm, csio_rns_uninit); | ||
807 | break; | ||
808 | |||
809 | case CSIO_RNFE_DOWN: | ||
810 | case CSIO_RNFE_NAME_MISSING: | ||
811 | csio_ln_dbg(ln, | ||
812 | "ssni:x%x Ignoring event %d recv from did x%x" | ||
813 | "in rn state[disappeared]\n", csio_rn_flowid(rn), | ||
814 | evt, rn->nport_id); | ||
815 | break; | ||
816 | |||
817 | default: | ||
818 | csio_ln_dbg(ln, | ||
819 | "ssni:x%x unexp event %d recv from did x%x" | ||
820 | "in rn state[disappeared]\n", csio_rn_flowid(rn), | ||
821 | evt, rn->nport_id); | ||
822 | CSIO_INC_STATS(rn, n_evt_unexp); | ||
823 | break; | ||
824 | } | ||
825 | } | ||
826 | |||
827 | /*****************************************************************************/ | ||
828 | /* END: Rnode SM */ | ||
829 | /*****************************************************************************/ | ||
830 | |||
831 | /* | ||
832 | * csio_rnode_devloss_handler - Device loss event handler | ||
833 | * @rn: rnode | ||
834 | * | ||
835 | * Post event to close rnode SM and free rnode. | ||
836 | */ | ||
837 | void | ||
838 | csio_rnode_devloss_handler(struct csio_rnode *rn) | ||
839 | { | ||
840 | struct csio_lnode *ln = csio_rnode_to_lnode(rn); | ||
841 | |||
842 | /* ignore if same rnode came back as online */ | ||
843 | if (csio_is_rnode_ready(rn)) | ||
844 | return; | ||
845 | |||
846 | csio_post_event(&rn->sm, CSIO_RNFE_CLOSE); | ||
847 | |||
848 | /* Free rn if in uninit state */ | ||
849 | if (csio_is_rnode_uninit(rn)) | ||
850 | csio_put_rnode(ln, rn); | ||
851 | } | ||
852 | |||
853 | /** | ||
854 | * csio_rnode_fwevt_handler - Event handler for firmware rnode events. | ||
855 | * @rn: rnode | ||
856 | * | ||
857 | */ | ||
858 | void | ||
859 | csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt) | ||
860 | { | ||
861 | struct csio_lnode *ln = csio_rnode_to_lnode(rn); | ||
862 | enum csio_rn_ev evt; | ||
863 | |||
864 | evt = CSIO_FWE_TO_RNFE(fwevt); | ||
865 | if (!evt) { | ||
866 | csio_ln_err(ln, "ssni:x%x Unhandled FW Rdev event: %d\n", | ||
867 | csio_rn_flowid(rn), fwevt); | ||
868 | CSIO_INC_STATS(rn, n_evt_unexp); | ||
869 | return; | ||
870 | } | ||
871 | CSIO_INC_STATS(rn, n_evt_fw[fwevt]); | ||
872 | |||
873 | /* Track previous & current events for debugging */ | ||
874 | rn->prev_evt = rn->cur_evt; | ||
875 | rn->cur_evt = fwevt; | ||
876 | |||
877 | /* Post event to rnode SM */ | ||
878 | csio_post_event(&rn->sm, evt); | ||
879 | |||
880 | /* Free rn if in uninit state */ | ||
881 | if (csio_is_rnode_uninit(rn)) | ||
882 | csio_put_rnode(ln, rn); | ||
883 | } | ||
884 | |||
885 | /* | ||
886 | * csio_rnode_init - Initialize rnode. | ||
887 | * @rn: RNode | ||
888 | * @ln: Associated lnode | ||
889 | * | ||
890 | * Caller is responsible for holding the lock. The lock is required | ||
891 | * to be held for inserting the rnode in ln->rnhead list. | ||
892 | */ | ||
893 | static int | ||
894 | csio_rnode_init(struct csio_rnode *rn, struct csio_lnode *ln) | ||
895 | { | ||
896 | csio_rnode_to_lnode(rn) = ln; | ||
897 | csio_init_state(&rn->sm, csio_rns_uninit); | ||
898 | INIT_LIST_HEAD(&rn->host_cmpl_q); | ||
899 | csio_rn_flowid(rn) = CSIO_INVALID_IDX; | ||
900 | |||
901 | /* Add rnode to list of lnodes->rnhead */ | ||
902 | list_add_tail(&rn->sm.sm_list, &ln->rnhead); | ||
903 | |||
904 | return 0; | ||
905 | } | ||
906 | |||
907 | static void | ||
908 | csio_rnode_exit(struct csio_rnode *rn) | ||
909 | { | ||
910 | list_del_init(&rn->sm.sm_list); | ||
911 | CSIO_DB_ASSERT(list_empty(&rn->host_cmpl_q)); | ||
912 | } | ||
diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h new file mode 100644 index 000000000000..a3b434c801da --- /dev/null +++ b/drivers/scsi/csiostor/csio_rnode.h | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef __CSIO_RNODE_H__ | ||
36 | #define __CSIO_RNODE_H__ | ||
37 | |||
38 | #include "csio_defs.h" | ||
39 | |||
40 | /* State machine evets */ | ||
41 | enum csio_rn_ev { | ||
42 | CSIO_RNFE_NONE = (uint32_t)0, /* None */ | ||
43 | CSIO_RNFE_LOGGED_IN, /* [N/F]Port login | ||
44 | * complete. | ||
45 | */ | ||
46 | CSIO_RNFE_PRLI_DONE, /* PRLI completed */ | ||
47 | CSIO_RNFE_PLOGI_RECV, /* Received PLOGI */ | ||
48 | CSIO_RNFE_PRLI_RECV, /* Received PLOGI */ | ||
49 | CSIO_RNFE_LOGO_RECV, /* Received LOGO */ | ||
50 | CSIO_RNFE_PRLO_RECV, /* Received PRLO */ | ||
51 | CSIO_RNFE_DOWN, /* Rnode is down */ | ||
52 | CSIO_RNFE_CLOSE, /* Close rnode */ | ||
53 | CSIO_RNFE_NAME_MISSING, /* Rnode name missing | ||
54 | * in name server. | ||
55 | */ | ||
56 | CSIO_RNFE_MAX_EVENT, | ||
57 | }; | ||
58 | |||
59 | /* rnode stats */ | ||
60 | struct csio_rnode_stats { | ||
61 | uint32_t n_err; /* error */ | ||
62 | uint32_t n_err_inval; /* invalid parameter */ | ||
63 | uint32_t n_err_nomem; /* error nomem */ | ||
64 | uint32_t n_evt_unexp; /* unexpected event */ | ||
65 | uint32_t n_evt_drop; /* unexpected event */ | ||
66 | uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */ | ||
67 | enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */ | ||
68 | uint32_t n_lun_rst; /* Number of resets of | ||
69 | * of LUNs under this | ||
70 | * target | ||
71 | */ | ||
72 | uint32_t n_lun_rst_fail; /* Number of LUN reset | ||
73 | * failures. | ||
74 | */ | ||
75 | uint32_t n_tgt_rst; /* Number of target resets */ | ||
76 | uint32_t n_tgt_rst_fail; /* Number of target reset | ||
77 | * failures. | ||
78 | */ | ||
79 | }; | ||
80 | |||
81 | /* Defines for rnode role */ | ||
82 | #define CSIO_RNFR_INITIATOR 0x1 | ||
83 | #define CSIO_RNFR_TARGET 0x2 | ||
84 | #define CSIO_RNFR_FABRIC 0x4 | ||
85 | #define CSIO_RNFR_NS 0x8 | ||
86 | #define CSIO_RNFR_NPORT 0x10 | ||
87 | |||
88 | struct csio_rnode { | ||
89 | struct csio_sm sm; /* State machine - | ||
90 | * should be the | ||
91 | * 1st member | ||
92 | */ | ||
93 | struct csio_lnode *lnp; /* Pointer to owning | ||
94 | * Lnode */ | ||
95 | uint32_t flowid; /* Firmware ID */ | ||
96 | struct list_head host_cmpl_q; /* SCSI IOs | ||
97 | * pending to completed | ||
98 | * to Mid-layer. | ||
99 | */ | ||
100 | /* FC identifiers for remote node */ | ||
101 | uint32_t nport_id; | ||
102 | uint16_t fcp_flags; /* FCP Flags */ | ||
103 | uint8_t cur_evt; /* Current event */ | ||
104 | uint8_t prev_evt; /* Previous event */ | ||
105 | uint32_t role; /* Fabric/Target/ | ||
106 | * Initiator/NS | ||
107 | */ | ||
108 | struct fcoe_rdev_entry *rdev_entry; /* Rdev entry */ | ||
109 | struct csio_service_parms rn_sparm; | ||
110 | |||
111 | /* FC transport attributes */ | ||
112 | struct fc_rport *rport; /* FC transport rport */ | ||
113 | uint32_t supp_classes; /* Supported FC classes */ | ||
114 | uint32_t maxframe_size; /* Max Frame size */ | ||
115 | uint32_t scsi_id; /* Transport given SCSI id */ | ||
116 | |||
117 | struct csio_rnode_stats stats; /* Common rnode stats */ | ||
118 | }; | ||
119 | |||
120 | #define csio_rn_flowid(rn) ((rn)->flowid) | ||
121 | #define csio_rn_wwpn(rn) ((rn)->rn_sparm.wwpn) | ||
122 | #define csio_rn_wwnn(rn) ((rn)->rn_sparm.wwnn) | ||
123 | #define csio_rnode_to_lnode(rn) ((rn)->lnp) | ||
124 | |||
125 | int csio_is_rnode_ready(struct csio_rnode *rn); | ||
126 | void csio_rnode_state_to_str(struct csio_rnode *rn, int8_t *str); | ||
127 | |||
128 | struct csio_rnode *csio_rnode_lookup_portid(struct csio_lnode *, uint32_t); | ||
129 | struct csio_rnode *csio_confirm_rnode(struct csio_lnode *, | ||
130 | uint32_t, struct fcoe_rdev_entry *); | ||
131 | |||
132 | void csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt); | ||
133 | |||
134 | void csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn); | ||
135 | |||
136 | void csio_reg_rnode(struct csio_rnode *); | ||
137 | void csio_unreg_rnode(struct csio_rnode *); | ||
138 | |||
139 | void csio_rnode_devloss_handler(struct csio_rnode *); | ||
140 | |||
141 | #endif /* ifndef __CSIO_RNODE_H__ */ | ||
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c new file mode 100644 index 000000000000..fdbd7daf01f2 --- /dev/null +++ b/drivers/scsi/csiostor/csio_scsi.c | |||
@@ -0,0 +1,2555 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/device.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/ctype.h> | ||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/slab.h> | ||
40 | #include <linux/string.h> | ||
41 | #include <linux/compiler.h> | ||
42 | #include <linux/export.h> | ||
43 | #include <linux/module.h> | ||
44 | #include <asm/unaligned.h> | ||
45 | #include <asm/page.h> | ||
46 | #include <scsi/scsi.h> | ||
47 | #include <scsi/scsi_device.h> | ||
48 | #include <scsi/scsi_transport_fc.h> | ||
49 | |||
50 | #include "csio_hw.h" | ||
51 | #include "csio_lnode.h" | ||
52 | #include "csio_rnode.h" | ||
53 | #include "csio_scsi.h" | ||
54 | #include "csio_init.h" | ||
55 | |||
56 | int csio_scsi_eqsize = 65536; | ||
57 | int csio_scsi_iqlen = 128; | ||
58 | int csio_scsi_ioreqs = 2048; | ||
59 | uint32_t csio_max_scan_tmo; | ||
60 | uint32_t csio_delta_scan_tmo = 5; | ||
61 | int csio_lun_qdepth = 32; | ||
62 | |||
63 | static int csio_ddp_descs = 128; | ||
64 | |||
65 | static int csio_do_abrt_cls(struct csio_hw *, | ||
66 | struct csio_ioreq *, bool); | ||
67 | |||
68 | static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev); | ||
69 | static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev); | ||
70 | static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev); | ||
71 | static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev); | ||
72 | static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev); | ||
73 | static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev); | ||
74 | |||
75 | /* | ||
76 | * csio_scsi_match_io - Match an ioreq with the given SCSI level data. | ||
77 | * @ioreq: The I/O request | ||
78 | * @sld: Level information | ||
79 | * | ||
80 | * Should be called with lock held. | ||
81 | * | ||
82 | */ | ||
83 | static bool | ||
84 | csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld) | ||
85 | { | ||
86 | struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq); | ||
87 | |||
88 | switch (sld->level) { | ||
89 | case CSIO_LEV_LUN: | ||
90 | if (scmnd == NULL) | ||
91 | return false; | ||
92 | |||
93 | return ((ioreq->lnode == sld->lnode) && | ||
94 | (ioreq->rnode == sld->rnode) && | ||
95 | ((uint64_t)scmnd->device->lun == sld->oslun)); | ||
96 | |||
97 | case CSIO_LEV_RNODE: | ||
98 | return ((ioreq->lnode == sld->lnode) && | ||
99 | (ioreq->rnode == sld->rnode)); | ||
100 | case CSIO_LEV_LNODE: | ||
101 | return (ioreq->lnode == sld->lnode); | ||
102 | case CSIO_LEV_ALL: | ||
103 | return true; | ||
104 | default: | ||
105 | return false; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * csio_scsi_gather_active_ios - Gather active I/Os based on level | ||
111 | * @scm: SCSI module | ||
112 | * @sld: Level information | ||
113 | * @dest: The queue where these I/Os have to be gathered. | ||
114 | * | ||
115 | * Should be called with lock held. | ||
116 | */ | ||
117 | static void | ||
118 | csio_scsi_gather_active_ios(struct csio_scsim *scm, | ||
119 | struct csio_scsi_level_data *sld, | ||
120 | struct list_head *dest) | ||
121 | { | ||
122 | struct list_head *tmp, *next; | ||
123 | |||
124 | if (list_empty(&scm->active_q)) | ||
125 | return; | ||
126 | |||
127 | /* Just splice the entire active_q into dest */ | ||
128 | if (sld->level == CSIO_LEV_ALL) { | ||
129 | list_splice_tail_init(&scm->active_q, dest); | ||
130 | return; | ||
131 | } | ||
132 | |||
133 | list_for_each_safe(tmp, next, &scm->active_q) { | ||
134 | if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) { | ||
135 | list_del_init(tmp); | ||
136 | list_add_tail(tmp, dest); | ||
137 | } | ||
138 | } | ||
139 | } | ||
140 | |||
141 | static inline bool | ||
142 | csio_scsi_itnexus_loss_error(uint16_t error) | ||
143 | { | ||
144 | switch (error) { | ||
145 | case FW_ERR_LINK_DOWN: | ||
146 | case FW_RDEV_NOT_READY: | ||
147 | case FW_ERR_RDEV_LOST: | ||
148 | case FW_ERR_RDEV_LOGO: | ||
149 | case FW_ERR_RDEV_IMPL_LOGO: | ||
150 | return 1; | ||
151 | } | ||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | static inline void | ||
156 | csio_scsi_tag(struct scsi_cmnd *scmnd, uint8_t *tag, uint8_t hq, | ||
157 | uint8_t oq, uint8_t sq) | ||
158 | { | ||
159 | char stag[2]; | ||
160 | |||
161 | if (scsi_populate_tag_msg(scmnd, stag)) { | ||
162 | switch (stag[0]) { | ||
163 | case HEAD_OF_QUEUE_TAG: | ||
164 | *tag = hq; | ||
165 | break; | ||
166 | case ORDERED_QUEUE_TAG: | ||
167 | *tag = oq; | ||
168 | break; | ||
169 | default: | ||
170 | *tag = sq; | ||
171 | break; | ||
172 | } | ||
173 | } else | ||
174 | *tag = 0; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod. | ||
179 | * @req: IO req structure. | ||
180 | * @addr: DMA location to place the payload. | ||
181 | * | ||
182 | * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests. | ||
183 | */ | ||
184 | static inline void | ||
185 | csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr) | ||
186 | { | ||
187 | struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr; | ||
188 | struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); | ||
189 | |||
190 | /* Check for Task Management */ | ||
191 | if (likely(scmnd->SCp.Message == 0)) { | ||
192 | int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); | ||
193 | fcp_cmnd->fc_tm_flags = 0; | ||
194 | fcp_cmnd->fc_cmdref = 0; | ||
195 | fcp_cmnd->fc_pri_ta = 0; | ||
196 | |||
197 | memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16); | ||
198 | csio_scsi_tag(scmnd, &fcp_cmnd->fc_pri_ta, | ||
199 | FCP_PTA_HEADQ, FCP_PTA_ORDERED, FCP_PTA_SIMPLE); | ||
200 | fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd)); | ||
201 | |||
202 | if (req->nsge) | ||
203 | if (req->datadir == DMA_TO_DEVICE) | ||
204 | fcp_cmnd->fc_flags = FCP_CFL_WRDATA; | ||
205 | else | ||
206 | fcp_cmnd->fc_flags = FCP_CFL_RDDATA; | ||
207 | else | ||
208 | fcp_cmnd->fc_flags = 0; | ||
209 | } else { | ||
210 | memset(fcp_cmnd, 0, sizeof(*fcp_cmnd)); | ||
211 | int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); | ||
212 | fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message; | ||
213 | } | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR. | ||
218 | * @req: IO req structure. | ||
219 | * @addr: DMA location to place the payload. | ||
220 | * @size: Size of WR (including FW WR + immed data + rsp SG entry | ||
221 | * | ||
222 | * Wrapper for populating fw_scsi_cmd_wr. | ||
223 | */ | ||
224 | static inline void | ||
225 | csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size) | ||
226 | { | ||
227 | struct csio_hw *hw = req->lnode->hwp; | ||
228 | struct csio_rnode *rn = req->rnode; | ||
229 | struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr; | ||
230 | struct csio_dma_buf *dma_buf; | ||
231 | uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; | ||
232 | |||
233 | wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_CMD_WR) | | ||
234 | FW_SCSI_CMD_WR_IMMDLEN(imm)); | ||
235 | wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | | ||
236 | FW_WR_LEN16( | ||
237 | DIV_ROUND_UP(size, 16))); | ||
238 | |||
239 | wr->cookie = (uintptr_t) req; | ||
240 | wr->iqid = (uint16_t)cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); | ||
241 | wr->tmo_val = (uint8_t) req->tmo; | ||
242 | wr->r3 = 0; | ||
243 | memset(&wr->r5, 0, 8); | ||
244 | |||
245 | /* Get RSP DMA buffer */ | ||
246 | dma_buf = &req->dma_buf; | ||
247 | |||
248 | /* Prepare RSP SGL */ | ||
249 | wr->rsp_dmalen = cpu_to_be32(dma_buf->len); | ||
250 | wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); | ||
251 | |||
252 | wr->r6 = 0; | ||
253 | |||
254 | wr->u.fcoe.ctl_pri = 0; | ||
255 | wr->u.fcoe.cp_en_class = 0; | ||
256 | wr->u.fcoe.r4_lo[0] = 0; | ||
257 | wr->u.fcoe.r4_lo[1] = 0; | ||
258 | |||
259 | /* Frame a FCP command */ | ||
260 | csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr + | ||
261 | sizeof(struct fw_scsi_cmd_wr))); | ||
262 | } | ||
263 | |||
264 | #define CSIO_SCSI_CMD_WR_SZ(_imm) \ | ||
265 | (sizeof(struct fw_scsi_cmd_wr) + /* WR size */ \ | ||
266 | ALIGN((_imm), 16)) /* Immed data */ | ||
267 | |||
268 | #define CSIO_SCSI_CMD_WR_SZ_16(_imm) \ | ||
269 | (ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16)) | ||
270 | |||
271 | /* | ||
272 | * csio_scsi_cmd - Create a SCSI CMD WR. | ||
273 | * @req: IO req structure. | ||
274 | * | ||
275 | * Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR. | ||
276 | * | ||
277 | */ | ||
278 | static inline void | ||
279 | csio_scsi_cmd(struct csio_ioreq *req) | ||
280 | { | ||
281 | struct csio_wr_pair wrp; | ||
282 | struct csio_hw *hw = req->lnode->hwp; | ||
283 | struct csio_scsim *scsim = csio_hw_to_scsim(hw); | ||
284 | uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len); | ||
285 | |||
286 | req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); | ||
287 | if (unlikely(req->drv_status != 0)) | ||
288 | return; | ||
289 | |||
290 | if (wrp.size1 >= size) { | ||
291 | /* Initialize WR in one shot */ | ||
292 | csio_scsi_init_cmd_wr(req, wrp.addr1, size); | ||
293 | } else { | ||
294 | uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); | ||
295 | |||
296 | /* | ||
297 | * Make a temporary copy of the WR and write back | ||
298 | * the copy into the WR pair. | ||
299 | */ | ||
300 | csio_scsi_init_cmd_wr(req, (void *)tmpwr, size); | ||
301 | memcpy(wrp.addr1, tmpwr, wrp.size1); | ||
302 | memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); | ||
303 | } | ||
304 | } | ||
305 | |||
306 | /* | ||
307 | * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL | ||
308 | * @hw: HW module | ||
309 | * @req: IO request | ||
310 | * @sgl: ULP TX SGL pointer. | ||
311 | * | ||
312 | */ | ||
313 | static inline void | ||
314 | csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req, | ||
315 | struct ulptx_sgl *sgl) | ||
316 | { | ||
317 | struct ulptx_sge_pair *sge_pair = NULL; | ||
318 | struct scatterlist *sgel; | ||
319 | uint32_t i = 0; | ||
320 | uint32_t xfer_len; | ||
321 | struct list_head *tmp; | ||
322 | struct csio_dma_buf *dma_buf; | ||
323 | struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); | ||
324 | |||
325 | sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_MORE | | ||
326 | ULPTX_NSGE(req->nsge)); | ||
327 | /* Now add the data SGLs */ | ||
328 | if (likely(!req->dcopy)) { | ||
329 | scsi_for_each_sg(scmnd, sgel, req->nsge, i) { | ||
330 | if (i == 0) { | ||
331 | sgl->addr0 = cpu_to_be64(sg_dma_address(sgel)); | ||
332 | sgl->len0 = cpu_to_be32(sg_dma_len(sgel)); | ||
333 | sge_pair = (struct ulptx_sge_pair *)(sgl + 1); | ||
334 | continue; | ||
335 | } | ||
336 | if ((i - 1) & 0x1) { | ||
337 | sge_pair->addr[1] = cpu_to_be64( | ||
338 | sg_dma_address(sgel)); | ||
339 | sge_pair->len[1] = cpu_to_be32( | ||
340 | sg_dma_len(sgel)); | ||
341 | sge_pair++; | ||
342 | } else { | ||
343 | sge_pair->addr[0] = cpu_to_be64( | ||
344 | sg_dma_address(sgel)); | ||
345 | sge_pair->len[0] = cpu_to_be32( | ||
346 | sg_dma_len(sgel)); | ||
347 | } | ||
348 | } | ||
349 | } else { | ||
350 | /* Program sg elements with driver's DDP buffer */ | ||
351 | xfer_len = scsi_bufflen(scmnd); | ||
352 | list_for_each(tmp, &req->gen_list) { | ||
353 | dma_buf = (struct csio_dma_buf *)tmp; | ||
354 | if (i == 0) { | ||
355 | sgl->addr0 = cpu_to_be64(dma_buf->paddr); | ||
356 | sgl->len0 = cpu_to_be32( | ||
357 | min(xfer_len, dma_buf->len)); | ||
358 | sge_pair = (struct ulptx_sge_pair *)(sgl + 1); | ||
359 | } else if ((i - 1) & 0x1) { | ||
360 | sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr); | ||
361 | sge_pair->len[1] = cpu_to_be32( | ||
362 | min(xfer_len, dma_buf->len)); | ||
363 | sge_pair++; | ||
364 | } else { | ||
365 | sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr); | ||
366 | sge_pair->len[0] = cpu_to_be32( | ||
367 | min(xfer_len, dma_buf->len)); | ||
368 | } | ||
369 | xfer_len -= min(xfer_len, dma_buf->len); | ||
370 | i++; | ||
371 | } | ||
372 | } | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * csio_scsi_init_read_wr - Initialize the READ SCSI WR. | ||
377 | * @req: IO req structure. | ||
378 | * @wrp: DMA location to place the payload. | ||
379 | * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL | ||
380 | * | ||
381 | * Wrapper for populating fw_scsi_read_wr. | ||
382 | */ | ||
383 | static inline void | ||
384 | csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size) | ||
385 | { | ||
386 | struct csio_hw *hw = req->lnode->hwp; | ||
387 | struct csio_rnode *rn = req->rnode; | ||
388 | struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp; | ||
389 | struct ulptx_sgl *sgl; | ||
390 | struct csio_dma_buf *dma_buf; | ||
391 | uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; | ||
392 | struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); | ||
393 | |||
394 | wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_READ_WR) | | ||
395 | FW_SCSI_READ_WR_IMMDLEN(imm)); | ||
396 | wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | | ||
397 | FW_WR_LEN16(DIV_ROUND_UP(size, 16))); | ||
398 | wr->cookie = (uintptr_t)req; | ||
399 | wr->iqid = (uint16_t)cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); | ||
400 | wr->tmo_val = (uint8_t)(req->tmo); | ||
401 | wr->use_xfer_cnt = 1; | ||
402 | wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); | ||
403 | wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); | ||
404 | /* Get RSP DMA buffer */ | ||
405 | dma_buf = &req->dma_buf; | ||
406 | |||
407 | /* Prepare RSP SGL */ | ||
408 | wr->rsp_dmalen = cpu_to_be32(dma_buf->len); | ||
409 | wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); | ||
410 | |||
411 | wr->r4 = 0; | ||
412 | |||
413 | wr->u.fcoe.ctl_pri = 0; | ||
414 | wr->u.fcoe.cp_en_class = 0; | ||
415 | wr->u.fcoe.r3_lo[0] = 0; | ||
416 | wr->u.fcoe.r3_lo[1] = 0; | ||
417 | csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp + | ||
418 | sizeof(struct fw_scsi_read_wr))); | ||
419 | |||
420 | /* Move WR pointer past command and immediate data */ | ||
421 | sgl = (struct ulptx_sgl *)((uintptr_t)wrp + | ||
422 | sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16)); | ||
423 | |||
424 | /* Fill in the DSGL */ | ||
425 | csio_scsi_init_ultptx_dsgl(hw, req, sgl); | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR. | ||
430 | * @req: IO req structure. | ||
431 | * @wrp: DMA location to place the payload. | ||
432 | * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL | ||
433 | * | ||
434 | * Wrapper for populating fw_scsi_write_wr. | ||
435 | */ | ||
436 | static inline void | ||
437 | csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size) | ||
438 | { | ||
439 | struct csio_hw *hw = req->lnode->hwp; | ||
440 | struct csio_rnode *rn = req->rnode; | ||
441 | struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp; | ||
442 | struct ulptx_sgl *sgl; | ||
443 | struct csio_dma_buf *dma_buf; | ||
444 | uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; | ||
445 | struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); | ||
446 | |||
447 | wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_WRITE_WR) | | ||
448 | FW_SCSI_WRITE_WR_IMMDLEN(imm)); | ||
449 | wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | | ||
450 | FW_WR_LEN16(DIV_ROUND_UP(size, 16))); | ||
451 | wr->cookie = (uintptr_t)req; | ||
452 | wr->iqid = (uint16_t)cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); | ||
453 | wr->tmo_val = (uint8_t)(req->tmo); | ||
454 | wr->use_xfer_cnt = 1; | ||
455 | wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); | ||
456 | wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); | ||
457 | /* Get RSP DMA buffer */ | ||
458 | dma_buf = &req->dma_buf; | ||
459 | |||
460 | /* Prepare RSP SGL */ | ||
461 | wr->rsp_dmalen = cpu_to_be32(dma_buf->len); | ||
462 | wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); | ||
463 | |||
464 | wr->r4 = 0; | ||
465 | |||
466 | wr->u.fcoe.ctl_pri = 0; | ||
467 | wr->u.fcoe.cp_en_class = 0; | ||
468 | wr->u.fcoe.r3_lo[0] = 0; | ||
469 | wr->u.fcoe.r3_lo[1] = 0; | ||
470 | csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp + | ||
471 | sizeof(struct fw_scsi_write_wr))); | ||
472 | |||
473 | /* Move WR pointer past command and immediate data */ | ||
474 | sgl = (struct ulptx_sgl *)((uintptr_t)wrp + | ||
475 | sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16)); | ||
476 | |||
477 | /* Fill in the DSGL */ | ||
478 | csio_scsi_init_ultptx_dsgl(hw, req, sgl); | ||
479 | } | ||
480 | |||
481 | /* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */ | ||
482 | #define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm) \ | ||
483 | do { \ | ||
484 | (sz) = sizeof(struct fw_scsi_##oper##_wr) + /* WR size */ \ | ||
485 | ALIGN((imm), 16) + /* Immed data */ \ | ||
486 | sizeof(struct ulptx_sgl); /* ulptx_sgl */ \ | ||
487 | \ | ||
488 | if (unlikely((req)->nsge > 1)) \ | ||
489 | (sz) += (sizeof(struct ulptx_sge_pair) * \ | ||
490 | (ALIGN(((req)->nsge - 1), 2) / 2)); \ | ||
491 | /* Data SGE */ \ | ||
492 | } while (0) | ||
493 | |||
494 | /* | ||
495 | * csio_scsi_read - Create a SCSI READ WR. | ||
496 | * @req: IO req structure. | ||
497 | * | ||
498 | * Gets a WR slot in the ingress queue and initializes it with | ||
499 | * SCSI READ WR. | ||
500 | * | ||
501 | */ | ||
502 | static inline void | ||
503 | csio_scsi_read(struct csio_ioreq *req) | ||
504 | { | ||
505 | struct csio_wr_pair wrp; | ||
506 | uint32_t size; | ||
507 | struct csio_hw *hw = req->lnode->hwp; | ||
508 | struct csio_scsim *scsim = csio_hw_to_scsim(hw); | ||
509 | |||
510 | CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len); | ||
511 | size = ALIGN(size, 16); | ||
512 | |||
513 | req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); | ||
514 | if (likely(req->drv_status == 0)) { | ||
515 | if (likely(wrp.size1 >= size)) { | ||
516 | /* Initialize WR in one shot */ | ||
517 | csio_scsi_init_read_wr(req, wrp.addr1, size); | ||
518 | } else { | ||
519 | uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); | ||
520 | /* | ||
521 | * Make a temporary copy of the WR and write back | ||
522 | * the copy into the WR pair. | ||
523 | */ | ||
524 | csio_scsi_init_read_wr(req, (void *)tmpwr, size); | ||
525 | memcpy(wrp.addr1, tmpwr, wrp.size1); | ||
526 | memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); | ||
527 | } | ||
528 | } | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * csio_scsi_write - Create a SCSI WRITE WR. | ||
533 | * @req: IO req structure. | ||
534 | * | ||
535 | * Gets a WR slot in the ingress queue and initializes it with | ||
536 | * SCSI WRITE WR. | ||
537 | * | ||
538 | */ | ||
539 | static inline void | ||
540 | csio_scsi_write(struct csio_ioreq *req) | ||
541 | { | ||
542 | struct csio_wr_pair wrp; | ||
543 | uint32_t size; | ||
544 | struct csio_hw *hw = req->lnode->hwp; | ||
545 | struct csio_scsim *scsim = csio_hw_to_scsim(hw); | ||
546 | |||
547 | CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len); | ||
548 | size = ALIGN(size, 16); | ||
549 | |||
550 | req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); | ||
551 | if (likely(req->drv_status == 0)) { | ||
552 | if (likely(wrp.size1 >= size)) { | ||
553 | /* Initialize WR in one shot */ | ||
554 | csio_scsi_init_write_wr(req, wrp.addr1, size); | ||
555 | } else { | ||
556 | uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); | ||
557 | /* | ||
558 | * Make a temporary copy of the WR and write back | ||
559 | * the copy into the WR pair. | ||
560 | */ | ||
561 | csio_scsi_init_write_wr(req, (void *)tmpwr, size); | ||
562 | memcpy(wrp.addr1, tmpwr, wrp.size1); | ||
563 | memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); | ||
564 | } | ||
565 | } | ||
566 | } | ||
567 | |||
568 | /* | ||
569 | * csio_setup_ddp - Setup DDP buffers for Read request. | ||
570 | * @req: IO req structure. | ||
571 | * | ||
572 | * Checks SGLs/Data buffers are virtually contiguous required for DDP. | ||
573 | * If contiguous,driver posts SGLs in the WR otherwise post internal | ||
574 | * buffers for such request for DDP. | ||
575 | */ | ||
576 | static inline void | ||
577 | csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req) | ||
578 | { | ||
579 | #ifdef __CSIO_DEBUG__ | ||
580 | struct csio_hw *hw = req->lnode->hwp; | ||
581 | #endif | ||
582 | struct scatterlist *sgel = NULL; | ||
583 | struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); | ||
584 | uint64_t sg_addr = 0; | ||
585 | uint32_t ddp_pagesz = 4096; | ||
586 | uint32_t buf_off; | ||
587 | struct csio_dma_buf *dma_buf = NULL; | ||
588 | uint32_t alloc_len = 0; | ||
589 | uint32_t xfer_len = 0; | ||
590 | uint32_t sg_len = 0; | ||
591 | uint32_t i; | ||
592 | |||
593 | scsi_for_each_sg(scmnd, sgel, req->nsge, i) { | ||
594 | sg_addr = sg_dma_address(sgel); | ||
595 | sg_len = sg_dma_len(sgel); | ||
596 | |||
597 | buf_off = sg_addr & (ddp_pagesz - 1); | ||
598 | |||
599 | /* Except 1st buffer,all buffer addr have to be Page aligned */ | ||
600 | if (i != 0 && buf_off) { | ||
601 | csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n", | ||
602 | sg_addr, sg_len); | ||
603 | goto unaligned; | ||
604 | } | ||
605 | |||
606 | /* Except last buffer,all buffer must end on page boundary */ | ||
607 | if ((i != (req->nsge - 1)) && | ||
608 | ((buf_off + sg_len) & (ddp_pagesz - 1))) { | ||
609 | csio_dbg(hw, | ||
610 | "SGL addr not ending on page boundary" | ||
611 | "(%llx:%d)\n", sg_addr, sg_len); | ||
612 | goto unaligned; | ||
613 | } | ||
614 | } | ||
615 | |||
616 | /* SGL's are virtually contiguous. HW will DDP to SGLs */ | ||
617 | req->dcopy = 0; | ||
618 | csio_scsi_read(req); | ||
619 | |||
620 | return; | ||
621 | |||
622 | unaligned: | ||
623 | CSIO_INC_STATS(scsim, n_unaligned); | ||
624 | /* | ||
625 | * For unaligned SGLs, driver will allocate internal DDP buffer. | ||
626 | * Once command is completed data from DDP buffer copied to SGLs | ||
627 | */ | ||
628 | req->dcopy = 1; | ||
629 | |||
630 | /* Use gen_list to store the DDP buffers */ | ||
631 | INIT_LIST_HEAD(&req->gen_list); | ||
632 | xfer_len = scsi_bufflen(scmnd); | ||
633 | |||
634 | i = 0; | ||
635 | /* Allocate ddp buffers for this request */ | ||
636 | while (alloc_len < xfer_len) { | ||
637 | dma_buf = csio_get_scsi_ddp(scsim); | ||
638 | if (dma_buf == NULL || i > scsim->max_sge) { | ||
639 | req->drv_status = -EBUSY; | ||
640 | break; | ||
641 | } | ||
642 | alloc_len += dma_buf->len; | ||
643 | /* Added to IO req */ | ||
644 | list_add_tail(&dma_buf->list, &req->gen_list); | ||
645 | i++; | ||
646 | } | ||
647 | |||
648 | if (!req->drv_status) { | ||
649 | /* set number of ddp bufs used */ | ||
650 | req->nsge = i; | ||
651 | csio_scsi_read(req); | ||
652 | return; | ||
653 | } | ||
654 | |||
655 | /* release dma descs */ | ||
656 | if (i > 0) | ||
657 | csio_put_scsi_ddp_list(scsim, &req->gen_list, i); | ||
658 | } | ||
659 | |||
660 | /* | ||
661 | * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR. | ||
662 | * @req: IO req structure. | ||
663 | * @addr: DMA location to place the payload. | ||
664 | * @size: Size of WR | ||
665 | * @abort: abort OR close | ||
666 | * | ||
667 | * Wrapper for populating fw_scsi_cmd_wr. | ||
668 | */ | ||
669 | static inline void | ||
670 | csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size, | ||
671 | bool abort) | ||
672 | { | ||
673 | struct csio_hw *hw = req->lnode->hwp; | ||
674 | struct csio_rnode *rn = req->rnode; | ||
675 | struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr; | ||
676 | |||
677 | wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_ABRT_CLS_WR)); | ||
678 | wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | | ||
679 | FW_WR_LEN16( | ||
680 | DIV_ROUND_UP(size, 16))); | ||
681 | |||
682 | wr->cookie = (uintptr_t) req; | ||
683 | wr->iqid = (uint16_t)cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); | ||
684 | wr->tmo_val = (uint8_t) req->tmo; | ||
685 | /* 0 for CHK_ALL_IO tells FW to look up t_cookie */ | ||
686 | wr->sub_opcode_to_chk_all_io = | ||
687 | (FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) | | ||
688 | FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0)); | ||
689 | wr->r3[0] = 0; | ||
690 | wr->r3[1] = 0; | ||
691 | wr->r3[2] = 0; | ||
692 | wr->r3[3] = 0; | ||
693 | /* Since we re-use the same ioreq for abort as well */ | ||
694 | wr->t_cookie = (uintptr_t) req; | ||
695 | } | ||
696 | |||
697 | static inline void | ||
698 | csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort) | ||
699 | { | ||
700 | struct csio_wr_pair wrp; | ||
701 | struct csio_hw *hw = req->lnode->hwp; | ||
702 | uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16); | ||
703 | |||
704 | req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); | ||
705 | if (req->drv_status != 0) | ||
706 | return; | ||
707 | |||
708 | if (wrp.size1 >= size) { | ||
709 | /* Initialize WR in one shot */ | ||
710 | csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort); | ||
711 | } else { | ||
712 | uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); | ||
713 | /* | ||
714 | * Make a temporary copy of the WR and write back | ||
715 | * the copy into the WR pair. | ||
716 | */ | ||
717 | csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort); | ||
718 | memcpy(wrp.addr1, tmpwr, wrp.size1); | ||
719 | memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); | ||
720 | } | ||
721 | } | ||
722 | |||
723 | /*****************************************************************************/ | ||
724 | /* START: SCSI SM */ | ||
725 | /*****************************************************************************/ | ||
726 | static void | ||
727 | csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt) | ||
728 | { | ||
729 | struct csio_hw *hw = req->lnode->hwp; | ||
730 | struct csio_scsim *scsim = csio_hw_to_scsim(hw); | ||
731 | |||
732 | switch (evt) { | ||
733 | case CSIO_SCSIE_START_IO: | ||
734 | |||
735 | if (req->nsge) { | ||
736 | if (req->datadir == DMA_TO_DEVICE) { | ||
737 | req->dcopy = 0; | ||
738 | csio_scsi_write(req); | ||
739 | } else | ||
740 | csio_setup_ddp(scsim, req); | ||
741 | } else { | ||
742 | csio_scsi_cmd(req); | ||
743 | } | ||
744 | |||
745 | if (likely(req->drv_status == 0)) { | ||
746 | /* change state and enqueue on active_q */ | ||
747 | csio_set_state(&req->sm, csio_scsis_io_active); | ||
748 | list_add_tail(&req->sm.sm_list, &scsim->active_q); | ||
749 | csio_wr_issue(hw, req->eq_idx, false); | ||
750 | CSIO_INC_STATS(scsim, n_active); | ||
751 | |||
752 | return; | ||
753 | } | ||
754 | break; | ||
755 | |||
756 | case CSIO_SCSIE_START_TM: | ||
757 | csio_scsi_cmd(req); | ||
758 | if (req->drv_status == 0) { | ||
759 | /* | ||
760 | * NOTE: We collect the affected I/Os prior to issuing | ||
761 | * LUN reset, and not after it. This is to prevent | ||
762 | * aborting I/Os that get issued after the LUN reset, | ||
763 | * but prior to LUN reset completion (in the event that | ||
764 | * the host stack has not blocked I/Os to a LUN that is | ||
765 | * being reset. | ||
766 | */ | ||
767 | csio_set_state(&req->sm, csio_scsis_tm_active); | ||
768 | list_add_tail(&req->sm.sm_list, &scsim->active_q); | ||
769 | csio_wr_issue(hw, req->eq_idx, false); | ||
770 | CSIO_INC_STATS(scsim, n_tm_active); | ||
771 | } | ||
772 | return; | ||
773 | |||
774 | case CSIO_SCSIE_ABORT: | ||
775 | case CSIO_SCSIE_CLOSE: | ||
776 | /* | ||
777 | * NOTE: | ||
778 | * We could get here due to : | ||
779 | * - a window in the cleanup path of the SCSI module | ||
780 | * (csio_scsi_abort_io()). Please see NOTE in this function. | ||
781 | * - a window in the time we tried to issue an abort/close | ||
782 | * of a request to FW, and the FW completed the request | ||
783 | * itself. | ||
784 | * Print a message for now, and return INVAL either way. | ||
785 | */ | ||
786 | req->drv_status = -EINVAL; | ||
787 | csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req); | ||
788 | break; | ||
789 | |||
790 | default: | ||
791 | csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); | ||
792 | CSIO_DB_ASSERT(0); | ||
793 | } | ||
794 | } | ||
795 | |||
796 | static void | ||
797 | csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt) | ||
798 | { | ||
799 | struct csio_hw *hw = req->lnode->hwp; | ||
800 | struct csio_scsim *scm = csio_hw_to_scsim(hw); | ||
801 | struct csio_rnode *rn; | ||
802 | |||
803 | switch (evt) { | ||
804 | case CSIO_SCSIE_COMPLETED: | ||
805 | CSIO_DEC_STATS(scm, n_active); | ||
806 | list_del_init(&req->sm.sm_list); | ||
807 | csio_set_state(&req->sm, csio_scsis_uninit); | ||
808 | /* | ||
809 | * In MSIX mode, with multiple queues, the SCSI compeltions | ||
810 | * could reach us sooner than the FW events sent to indicate | ||
811 | * I-T nexus loss (link down, remote device logo etc). We | ||
812 | * dont want to be returning such I/Os to the upper layer | ||
813 | * immediately, since we wouldnt have reported the I-T nexus | ||
814 | * loss itself. This forces us to serialize such completions | ||
815 | * with the reporting of the I-T nexus loss. Therefore, we | ||
816 | * internally queue up such up such completions in the rnode. | ||
817 | * The reporting of I-T nexus loss to the upper layer is then | ||
818 | * followed by the returning of I/Os in this internal queue. | ||
819 | * Having another state alongwith another queue helps us take | ||
820 | * actions for events such as ABORT received while we are | ||
821 | * in this rnode queue. | ||
822 | */ | ||
823 | if (unlikely(req->wr_status != FW_SUCCESS)) { | ||
824 | rn = req->rnode; | ||
825 | /* | ||
826 | * FW says remote device is lost, but rnode | ||
827 | * doesnt reflect it. | ||
828 | */ | ||
829 | if (csio_scsi_itnexus_loss_error(req->wr_status) && | ||
830 | csio_is_rnode_ready(rn)) { | ||
831 | csio_set_state(&req->sm, | ||
832 | csio_scsis_shost_cmpl_await); | ||
833 | list_add_tail(&req->sm.sm_list, | ||
834 | &rn->host_cmpl_q); | ||
835 | } | ||
836 | } | ||
837 | |||
838 | break; | ||
839 | |||
840 | case CSIO_SCSIE_ABORT: | ||
841 | csio_scsi_abrt_cls(req, SCSI_ABORT); | ||
842 | if (req->drv_status == 0) { | ||
843 | csio_wr_issue(hw, req->eq_idx, false); | ||
844 | csio_set_state(&req->sm, csio_scsis_aborting); | ||
845 | } | ||
846 | break; | ||
847 | |||
848 | case CSIO_SCSIE_CLOSE: | ||
849 | csio_scsi_abrt_cls(req, SCSI_CLOSE); | ||
850 | if (req->drv_status == 0) { | ||
851 | csio_wr_issue(hw, req->eq_idx, false); | ||
852 | csio_set_state(&req->sm, csio_scsis_closing); | ||
853 | } | ||
854 | break; | ||
855 | |||
856 | case CSIO_SCSIE_DRVCLEANUP: | ||
857 | req->wr_status = FW_HOSTERROR; | ||
858 | CSIO_DEC_STATS(scm, n_active); | ||
859 | csio_set_state(&req->sm, csio_scsis_uninit); | ||
860 | break; | ||
861 | |||
862 | default: | ||
863 | csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); | ||
864 | CSIO_DB_ASSERT(0); | ||
865 | } | ||
866 | } | ||
867 | |||
868 | static void | ||
869 | csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt) | ||
870 | { | ||
871 | struct csio_hw *hw = req->lnode->hwp; | ||
872 | struct csio_scsim *scm = csio_hw_to_scsim(hw); | ||
873 | |||
874 | switch (evt) { | ||
875 | case CSIO_SCSIE_COMPLETED: | ||
876 | CSIO_DEC_STATS(scm, n_tm_active); | ||
877 | list_del_init(&req->sm.sm_list); | ||
878 | csio_set_state(&req->sm, csio_scsis_uninit); | ||
879 | |||
880 | break; | ||
881 | |||
882 | case CSIO_SCSIE_ABORT: | ||
883 | csio_scsi_abrt_cls(req, SCSI_ABORT); | ||
884 | if (req->drv_status == 0) { | ||
885 | csio_wr_issue(hw, req->eq_idx, false); | ||
886 | csio_set_state(&req->sm, csio_scsis_aborting); | ||
887 | } | ||
888 | break; | ||
889 | |||
890 | |||
891 | case CSIO_SCSIE_CLOSE: | ||
892 | csio_scsi_abrt_cls(req, SCSI_CLOSE); | ||
893 | if (req->drv_status == 0) { | ||
894 | csio_wr_issue(hw, req->eq_idx, false); | ||
895 | csio_set_state(&req->sm, csio_scsis_closing); | ||
896 | } | ||
897 | break; | ||
898 | |||
899 | case CSIO_SCSIE_DRVCLEANUP: | ||
900 | req->wr_status = FW_HOSTERROR; | ||
901 | CSIO_DEC_STATS(scm, n_tm_active); | ||
902 | csio_set_state(&req->sm, csio_scsis_uninit); | ||
903 | break; | ||
904 | |||
905 | default: | ||
906 | csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); | ||
907 | CSIO_DB_ASSERT(0); | ||
908 | } | ||
909 | } | ||
910 | |||
911 | static void | ||
912 | csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt) | ||
913 | { | ||
914 | struct csio_hw *hw = req->lnode->hwp; | ||
915 | struct csio_scsim *scm = csio_hw_to_scsim(hw); | ||
916 | |||
917 | switch (evt) { | ||
918 | case CSIO_SCSIE_COMPLETED: | ||
919 | csio_dbg(hw, | ||
920 | "ioreq %p recvd cmpltd (wr_status:%d) " | ||
921 | "in aborting st\n", req, req->wr_status); | ||
922 | /* | ||
923 | * Use -ECANCELED to explicitly tell the ABORTED event that | ||
924 | * the original I/O was returned to driver by FW. | ||
925 | * We dont really care if the I/O was returned with success by | ||
926 | * FW (because the ABORT and completion of the I/O crossed each | ||
927 | * other), or any other return value. Once we are in aborting | ||
928 | * state, the success or failure of the I/O is unimportant to | ||
929 | * us. | ||
930 | */ | ||
931 | req->drv_status = -ECANCELED; | ||
932 | break; | ||
933 | |||
934 | case CSIO_SCSIE_ABORT: | ||
935 | CSIO_INC_STATS(scm, n_abrt_dups); | ||
936 | break; | ||
937 | |||
938 | case CSIO_SCSIE_ABORTED: | ||
939 | |||
940 | csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n", | ||
941 | req, req->wr_status, req->drv_status); | ||
942 | /* | ||
943 | * Check if original I/O WR completed before the Abort | ||
944 | * completion. | ||
945 | */ | ||
946 | if (req->drv_status != -ECANCELED) { | ||
947 | csio_warn(hw, | ||
948 | "Abort completed before original I/O," | ||
949 | " req:%p\n", req); | ||
950 | CSIO_DB_ASSERT(0); | ||
951 | } | ||
952 | |||
953 | /* | ||
954 | * There are the following possible scenarios: | ||
955 | * 1. The abort completed successfully, FW returned FW_SUCCESS. | ||
956 | * 2. The completion of an I/O and the receipt of | ||
957 | * abort for that I/O by the FW crossed each other. | ||
958 | * The FW returned FW_EINVAL. The original I/O would have | ||
959 | * returned with FW_SUCCESS or any other SCSI error. | ||
960 | * 3. The FW couldnt sent the abort out on the wire, as there | ||
961 | * was an I-T nexus loss (link down, remote device logged | ||
962 | * out etc). FW sent back an appropriate IT nexus loss status | ||
963 | * for the abort. | ||
964 | * 4. FW sent an abort, but abort timed out (remote device | ||
965 | * didnt respond). FW replied back with | ||
966 | * FW_SCSI_ABORT_TIMEDOUT. | ||
967 | * 5. FW couldnt genuinely abort the request for some reason, | ||
968 | * and sent us an error. | ||
969 | * | ||
970 | * The first 3 scenarios are treated as succesful abort | ||
971 | * operations by the host, while the last 2 are failed attempts | ||
972 | * to abort. Manipulate the return value of the request | ||
973 | * appropriately, so that host can convey these results | ||
974 | * back to the upper layer. | ||
975 | */ | ||
976 | if ((req->wr_status == FW_SUCCESS) || | ||
977 | (req->wr_status == FW_EINVAL) || | ||
978 | csio_scsi_itnexus_loss_error(req->wr_status)) | ||
979 | req->wr_status = FW_SCSI_ABORT_REQUESTED; | ||
980 | |||
981 | CSIO_DEC_STATS(scm, n_active); | ||
982 | list_del_init(&req->sm.sm_list); | ||
983 | csio_set_state(&req->sm, csio_scsis_uninit); | ||
984 | break; | ||
985 | |||
986 | case CSIO_SCSIE_DRVCLEANUP: | ||
987 | req->wr_status = FW_HOSTERROR; | ||
988 | CSIO_DEC_STATS(scm, n_active); | ||
989 | csio_set_state(&req->sm, csio_scsis_uninit); | ||
990 | break; | ||
991 | |||
992 | case CSIO_SCSIE_CLOSE: | ||
993 | /* | ||
994 | * We can receive this event from the module | ||
995 | * cleanup paths, if the FW forgot to reply to the ABORT WR | ||
996 | * and left this ioreq in this state. For now, just ignore | ||
997 | * the event. The CLOSE event is sent to this state, as | ||
998 | * the LINK may have already gone down. | ||
999 | */ | ||
1000 | break; | ||
1001 | |||
1002 | default: | ||
1003 | csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); | ||
1004 | CSIO_DB_ASSERT(0); | ||
1005 | } | ||
1006 | } | ||
1007 | |||
1008 | static void | ||
1009 | csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt) | ||
1010 | { | ||
1011 | struct csio_hw *hw = req->lnode->hwp; | ||
1012 | struct csio_scsim *scm = csio_hw_to_scsim(hw); | ||
1013 | |||
1014 | switch (evt) { | ||
1015 | case CSIO_SCSIE_COMPLETED: | ||
1016 | csio_dbg(hw, | ||
1017 | "ioreq %p recvd cmpltd (wr_status:%d) " | ||
1018 | "in closing st\n", req, req->wr_status); | ||
1019 | /* | ||
1020 | * Use -ECANCELED to explicitly tell the CLOSED event that | ||
1021 | * the original I/O was returned to driver by FW. | ||
1022 | * We dont really care if the I/O was returned with success by | ||
1023 | * FW (because the CLOSE and completion of the I/O crossed each | ||
1024 | * other), or any other return value. Once we are in aborting | ||
1025 | * state, the success or failure of the I/O is unimportant to | ||
1026 | * us. | ||
1027 | */ | ||
1028 | req->drv_status = -ECANCELED; | ||
1029 | break; | ||
1030 | |||
1031 | case CSIO_SCSIE_CLOSED: | ||
1032 | /* | ||
1033 | * Check if original I/O WR completed before the Close | ||
1034 | * completion. | ||
1035 | */ | ||
1036 | if (req->drv_status != -ECANCELED) { | ||
1037 | csio_fatal(hw, | ||
1038 | "Close completed before original I/O," | ||
1039 | " req:%p\n", req); | ||
1040 | CSIO_DB_ASSERT(0); | ||
1041 | } | ||
1042 | |||
1043 | /* | ||
1044 | * Either close succeeded, or we issued close to FW at the | ||
1045 | * same time FW compelted it to us. Either way, the I/O | ||
1046 | * is closed. | ||
1047 | */ | ||
1048 | CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) || | ||
1049 | (req->wr_status == FW_EINVAL)); | ||
1050 | req->wr_status = FW_SCSI_CLOSE_REQUESTED; | ||
1051 | |||
1052 | CSIO_DEC_STATS(scm, n_active); | ||
1053 | list_del_init(&req->sm.sm_list); | ||
1054 | csio_set_state(&req->sm, csio_scsis_uninit); | ||
1055 | break; | ||
1056 | |||
1057 | case CSIO_SCSIE_CLOSE: | ||
1058 | break; | ||
1059 | |||
1060 | case CSIO_SCSIE_DRVCLEANUP: | ||
1061 | req->wr_status = FW_HOSTERROR; | ||
1062 | CSIO_DEC_STATS(scm, n_active); | ||
1063 | csio_set_state(&req->sm, csio_scsis_uninit); | ||
1064 | break; | ||
1065 | |||
1066 | default: | ||
1067 | csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); | ||
1068 | CSIO_DB_ASSERT(0); | ||
1069 | } | ||
1070 | } | ||
1071 | |||
1072 | static void | ||
1073 | csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt) | ||
1074 | { | ||
1075 | switch (evt) { | ||
1076 | case CSIO_SCSIE_ABORT: | ||
1077 | case CSIO_SCSIE_CLOSE: | ||
1078 | /* | ||
1079 | * Just succeed the abort request, and hope that | ||
1080 | * the remote device unregister path will cleanup | ||
1081 | * this I/O to the upper layer within a sane | ||
1082 | * amount of time. | ||
1083 | */ | ||
1084 | /* | ||
1085 | * A close can come in during a LINK DOWN. The FW would have | ||
1086 | * returned us the I/O back, but not the remote device lost | ||
1087 | * FW event. In this interval, if the I/O times out at the upper | ||
1088 | * layer, a close can come in. Take the same action as abort: | ||
1089 | * return success, and hope that the remote device unregister | ||
1090 | * path will cleanup this I/O. If the FW still doesnt send | ||
1091 | * the msg, the close times out, and the upper layer resorts | ||
1092 | * to the next level of error recovery. | ||
1093 | */ | ||
1094 | req->drv_status = 0; | ||
1095 | break; | ||
1096 | case CSIO_SCSIE_DRVCLEANUP: | ||
1097 | csio_set_state(&req->sm, csio_scsis_uninit); | ||
1098 | break; | ||
1099 | default: | ||
1100 | csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n", | ||
1101 | evt, req); | ||
1102 | CSIO_DB_ASSERT(0); | ||
1103 | } | ||
1104 | } | ||
1105 | |||
1106 | /* | ||
1107 | * csio_scsi_cmpl_handler - WR completion handler for SCSI. | ||
1108 | * @hw: HW module. | ||
1109 | * @wr: The completed WR from the ingress queue. | ||
1110 | * @len: Length of the WR. | ||
1111 | * @flb: Freelist buffer array. | ||
1112 | * @priv: Private object | ||
1113 | * @scsiwr: Pointer to SCSI WR. | ||
1114 | * | ||
1115 | * This is the WR completion handler called per completion from the | ||
1116 | * ISR. It is called with lock held. It walks past the RSS and CPL message | ||
1117 | * header where the actual WR is present. | ||
1118 | * It then gets the status, WR handle (ioreq pointer) and the len of | ||
1119 | * the WR, based on WR opcode. Only on a non-good status is the entire | ||
1120 | * WR copied into the WR cache (ioreq->fw_wr). | ||
1121 | * The ioreq corresponding to the WR is returned to the caller. | ||
1122 | * NOTE: The SCSI queue doesnt allocate a freelist today, hence | ||
1123 | * no freelist buffer is expected. | ||
1124 | */ | ||
1125 | struct csio_ioreq * | ||
1126 | csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len, | ||
1127 | struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr) | ||
1128 | { | ||
1129 | struct csio_ioreq *ioreq = NULL; | ||
1130 | struct cpl_fw6_msg *cpl; | ||
1131 | uint8_t *tempwr; | ||
1132 | uint8_t status; | ||
1133 | struct csio_scsim *scm = csio_hw_to_scsim(hw); | ||
1134 | |||
1135 | /* skip RSS header */ | ||
1136 | cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64)); | ||
1137 | |||
1138 | if (unlikely(cpl->opcode != CPL_FW6_MSG)) { | ||
1139 | csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n", | ||
1140 | cpl->opcode); | ||
1141 | CSIO_INC_STATS(scm, n_inval_cplop); | ||
1142 | return NULL; | ||
1143 | } | ||
1144 | |||
1145 | tempwr = (uint8_t *)(cpl->data); | ||
1146 | status = csio_wr_status(tempwr); | ||
1147 | *scsiwr = tempwr; | ||
1148 | |||
1149 | if (likely((*tempwr == FW_SCSI_READ_WR) || | ||
1150 | (*tempwr == FW_SCSI_WRITE_WR) || | ||
1151 | (*tempwr == FW_SCSI_CMD_WR))) { | ||
1152 | ioreq = (struct csio_ioreq *)((uintptr_t) | ||
1153 | (((struct fw_scsi_read_wr *)tempwr)->cookie)); | ||
1154 | CSIO_DB_ASSERT(virt_addr_valid(ioreq)); | ||
1155 | |||
1156 | ioreq->wr_status = status; | ||
1157 | |||
1158 | return ioreq; | ||
1159 | } | ||
1160 | |||
1161 | if (*tempwr == FW_SCSI_ABRT_CLS_WR) { | ||
1162 | ioreq = (struct csio_ioreq *)((uintptr_t) | ||
1163 | (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie)); | ||
1164 | CSIO_DB_ASSERT(virt_addr_valid(ioreq)); | ||
1165 | |||
1166 | ioreq->wr_status = status; | ||
1167 | return ioreq; | ||
1168 | } | ||
1169 | |||
1170 | csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr); | ||
1171 | CSIO_INC_STATS(scm, n_inval_scsiop); | ||
1172 | return NULL; | ||
1173 | } | ||
1174 | |||
1175 | /* | ||
1176 | * csio_scsi_cleanup_io_q - Cleanup the given queue. | ||
1177 | * @scm: SCSI module. | ||
1178 | * @q: Queue to be cleaned up. | ||
1179 | * | ||
1180 | * Called with lock held. Has to exit with lock held. | ||
1181 | */ | ||
1182 | void | ||
1183 | csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q) | ||
1184 | { | ||
1185 | struct csio_hw *hw = scm->hw; | ||
1186 | struct csio_ioreq *ioreq; | ||
1187 | struct list_head *tmp, *next; | ||
1188 | struct scsi_cmnd *scmnd; | ||
1189 | |||
1190 | /* Call back the completion routines of the active_q */ | ||
1191 | list_for_each_safe(tmp, next, q) { | ||
1192 | ioreq = (struct csio_ioreq *)tmp; | ||
1193 | csio_scsi_drvcleanup(ioreq); | ||
1194 | list_del_init(&ioreq->sm.sm_list); | ||
1195 | scmnd = csio_scsi_cmnd(ioreq); | ||
1196 | spin_unlock_irq(&hw->lock); | ||
1197 | |||
1198 | /* | ||
1199 | * Upper layers may have cleared this command, hence this | ||
1200 | * check to avoid accessing stale references. | ||
1201 | */ | ||
1202 | if (scmnd != NULL) | ||
1203 | ioreq->io_cbfn(hw, ioreq); | ||
1204 | |||
1205 | spin_lock_irq(&scm->freelist_lock); | ||
1206 | csio_put_scsi_ioreq(scm, ioreq); | ||
1207 | spin_unlock_irq(&scm->freelist_lock); | ||
1208 | |||
1209 | spin_lock_irq(&hw->lock); | ||
1210 | } | ||
1211 | } | ||
1212 | |||
1213 | #define CSIO_SCSI_ABORT_Q_POLL_MS 2000 | ||
1214 | |||
1215 | static void | ||
1216 | csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd) | ||
1217 | { | ||
1218 | struct csio_lnode *ln = ioreq->lnode; | ||
1219 | struct csio_hw *hw = ln->hwp; | ||
1220 | int ready = 0; | ||
1221 | struct csio_scsim *scsim = csio_hw_to_scsim(hw); | ||
1222 | int rv; | ||
1223 | |||
1224 | if (csio_scsi_cmnd(ioreq) != scmnd) { | ||
1225 | CSIO_INC_STATS(scsim, n_abrt_race_comp); | ||
1226 | return; | ||
1227 | } | ||
1228 | |||
1229 | ready = csio_is_lnode_ready(ln); | ||
1230 | |||
1231 | rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE)); | ||
1232 | if (rv != 0) { | ||
1233 | if (ready) | ||
1234 | CSIO_INC_STATS(scsim, n_abrt_busy_error); | ||
1235 | else | ||
1236 | CSIO_INC_STATS(scsim, n_cls_busy_error); | ||
1237 | } | ||
1238 | } | ||
1239 | |||
1240 | /* | ||
1241 | * csio_scsi_abort_io_q - Abort all I/Os on given queue | ||
1242 | * @scm: SCSI module. | ||
1243 | * @q: Queue to abort. | ||
1244 | * @tmo: Timeout in ms | ||
1245 | * | ||
1246 | * Attempt to abort all I/Os on given queue, and wait for a max | ||
1247 | * of tmo milliseconds for them to complete. Returns success | ||
1248 | * if all I/Os are aborted. Else returns -ETIMEDOUT. | ||
1249 | * Should be entered with lock held. Exits with lock held. | ||
1250 | * NOTE: | ||
1251 | * Lock has to be held across the loop that aborts I/Os, since dropping the lock | ||
1252 | * in between can cause the list to be corrupted. As a result, the caller | ||
1253 | * of this function has to ensure that the number of I/os to be aborted | ||
1254 | * is finite enough to not cause lock-held-for-too-long issues. | ||
1255 | */ | ||
1256 | static int | ||
1257 | csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo) | ||
1258 | { | ||
1259 | struct csio_hw *hw = scm->hw; | ||
1260 | struct list_head *tmp, *next; | ||
1261 | int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS); | ||
1262 | struct scsi_cmnd *scmnd; | ||
1263 | |||
1264 | if (list_empty(q)) | ||
1265 | return 0; | ||
1266 | |||
1267 | csio_dbg(hw, "Aborting SCSI I/Os\n"); | ||
1268 | |||
1269 | /* Now abort/close I/Os in the queue passed */ | ||
1270 | list_for_each_safe(tmp, next, q) { | ||
1271 | scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp); | ||
1272 | csio_abrt_cls((struct csio_ioreq *)tmp, scmnd); | ||
1273 | } | ||
1274 | |||
1275 | /* Wait till all active I/Os are completed/aborted/closed */ | ||
1276 | while (!list_empty(q) && count--) { | ||
1277 | spin_unlock_irq(&hw->lock); | ||
1278 | msleep(CSIO_SCSI_ABORT_Q_POLL_MS); | ||
1279 | spin_lock_irq(&hw->lock); | ||
1280 | } | ||
1281 | |||
1282 | /* all aborts completed */ | ||
1283 | if (list_empty(q)) | ||
1284 | return 0; | ||
1285 | |||
1286 | return -ETIMEDOUT; | ||
1287 | } | ||
1288 | |||
1289 | /* | ||
1290 | * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module. | ||
1291 | * @scm: SCSI module. | ||
1292 | * @abort: abort required. | ||
1293 | * Called with lock held, should exit with lock held. | ||
1294 | * Can sleep when waiting for I/Os to complete. | ||
1295 | */ | ||
1296 | int | ||
1297 | csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort) | ||
1298 | { | ||
1299 | struct csio_hw *hw = scm->hw; | ||
1300 | int rv = 0; | ||
1301 | int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS); | ||
1302 | |||
1303 | /* No I/Os pending */ | ||
1304 | if (list_empty(&scm->active_q)) | ||
1305 | return 0; | ||
1306 | |||
1307 | /* Wait until all active I/Os are completed */ | ||
1308 | while (!list_empty(&scm->active_q) && count--) { | ||
1309 | spin_unlock_irq(&hw->lock); | ||
1310 | msleep(CSIO_SCSI_ABORT_Q_POLL_MS); | ||
1311 | spin_lock_irq(&hw->lock); | ||
1312 | } | ||
1313 | |||
1314 | /* all I/Os completed */ | ||
1315 | if (list_empty(&scm->active_q)) | ||
1316 | return 0; | ||
1317 | |||
1318 | /* Else abort */ | ||
1319 | if (abort) { | ||
1320 | rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000); | ||
1321 | if (rv == 0) | ||
1322 | return rv; | ||
1323 | csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n"); | ||
1324 | } | ||
1325 | |||
1326 | csio_scsi_cleanup_io_q(scm, &scm->active_q); | ||
1327 | |||
1328 | CSIO_DB_ASSERT(list_empty(&scm->active_q)); | ||
1329 | |||
1330 | return rv; | ||
1331 | } | ||
1332 | |||
1333 | /* | ||
1334 | * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode. | ||
1335 | * @scm: SCSI module. | ||
1336 | * @lnode: lnode | ||
1337 | * | ||
1338 | * Called with lock held, should exit with lock held. | ||
1339 | * Can sleep (with dropped lock) when waiting for I/Os to complete. | ||
1340 | */ | ||
1341 | int | ||
1342 | csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln) | ||
1343 | { | ||
1344 | struct csio_hw *hw = scm->hw; | ||
1345 | struct csio_scsi_level_data sld; | ||
1346 | int rv; | ||
1347 | int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS); | ||
1348 | |||
1349 | csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln); | ||
1350 | |||
1351 | sld.level = CSIO_LEV_LNODE; | ||
1352 | sld.lnode = ln; | ||
1353 | INIT_LIST_HEAD(&ln->cmpl_q); | ||
1354 | csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q); | ||
1355 | |||
1356 | /* No I/Os pending on this lnode */ | ||
1357 | if (list_empty(&ln->cmpl_q)) | ||
1358 | return 0; | ||
1359 | |||
1360 | /* Wait until all active I/Os on this lnode are completed */ | ||
1361 | while (!list_empty(&ln->cmpl_q) && count--) { | ||
1362 | spin_unlock_irq(&hw->lock); | ||
1363 | msleep(CSIO_SCSI_ABORT_Q_POLL_MS); | ||
1364 | spin_lock_irq(&hw->lock); | ||
1365 | } | ||
1366 | |||
1367 | /* all I/Os completed */ | ||
1368 | if (list_empty(&ln->cmpl_q)) | ||
1369 | return 0; | ||
1370 | |||
1371 | csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln); | ||
1372 | |||
1373 | /* I/Os are pending, abort them */ | ||
1374 | rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000); | ||
1375 | if (rv != 0) { | ||
1376 | csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n"); | ||
1377 | csio_scsi_cleanup_io_q(scm, &ln->cmpl_q); | ||
1378 | } | ||
1379 | |||
1380 | CSIO_DB_ASSERT(list_empty(&ln->cmpl_q)); | ||
1381 | |||
1382 | return rv; | ||
1383 | } | ||
1384 | |||
1385 | static ssize_t | ||
1386 | csio_show_hw_state(struct device *dev, | ||
1387 | struct device_attribute *attr, char *buf) | ||
1388 | { | ||
1389 | struct csio_lnode *ln = shost_priv(class_to_shost(dev)); | ||
1390 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
1391 | |||
1392 | if (csio_is_hw_ready(hw)) | ||
1393 | return snprintf(buf, PAGE_SIZE, "ready\n"); | ||
1394 | else | ||
1395 | return snprintf(buf, PAGE_SIZE, "not ready\n"); | ||
1396 | } | ||
1397 | |||
1398 | /* Device reset */ | ||
1399 | static ssize_t | ||
1400 | csio_device_reset(struct device *dev, | ||
1401 | struct device_attribute *attr, const char *buf, size_t count) | ||
1402 | { | ||
1403 | struct csio_lnode *ln = shost_priv(class_to_shost(dev)); | ||
1404 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
1405 | |||
1406 | if (*buf != '1') | ||
1407 | return -EINVAL; | ||
1408 | |||
1409 | /* Delete NPIV lnodes */ | ||
1410 | csio_lnodes_exit(hw, 1); | ||
1411 | |||
1412 | /* Block upper IOs */ | ||
1413 | csio_lnodes_block_request(hw); | ||
1414 | |||
1415 | spin_lock_irq(&hw->lock); | ||
1416 | csio_hw_reset(hw); | ||
1417 | spin_unlock_irq(&hw->lock); | ||
1418 | |||
1419 | /* Unblock upper IOs */ | ||
1420 | csio_lnodes_unblock_request(hw); | ||
1421 | return count; | ||
1422 | } | ||
1423 | |||
1424 | /* disable port */ | ||
1425 | static ssize_t | ||
1426 | csio_disable_port(struct device *dev, | ||
1427 | struct device_attribute *attr, const char *buf, size_t count) | ||
1428 | { | ||
1429 | struct csio_lnode *ln = shost_priv(class_to_shost(dev)); | ||
1430 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
1431 | bool disable; | ||
1432 | |||
1433 | if (*buf == '1' || *buf == '0') | ||
1434 | disable = (*buf == '1') ? true : false; | ||
1435 | else | ||
1436 | return -EINVAL; | ||
1437 | |||
1438 | /* Block upper IOs */ | ||
1439 | csio_lnodes_block_by_port(hw, ln->portid); | ||
1440 | |||
1441 | spin_lock_irq(&hw->lock); | ||
1442 | csio_disable_lnodes(hw, ln->portid, disable); | ||
1443 | spin_unlock_irq(&hw->lock); | ||
1444 | |||
1445 | /* Unblock upper IOs */ | ||
1446 | csio_lnodes_unblock_by_port(hw, ln->portid); | ||
1447 | return count; | ||
1448 | } | ||
1449 | |||
1450 | /* Show debug level */ | ||
1451 | static ssize_t | ||
1452 | csio_show_dbg_level(struct device *dev, | ||
1453 | struct device_attribute *attr, char *buf) | ||
1454 | { | ||
1455 | struct csio_lnode *ln = shost_priv(class_to_shost(dev)); | ||
1456 | |||
1457 | return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level); | ||
1458 | } | ||
1459 | |||
1460 | /* Store debug level */ | ||
1461 | static ssize_t | ||
1462 | csio_store_dbg_level(struct device *dev, | ||
1463 | struct device_attribute *attr, const char *buf, size_t count) | ||
1464 | { | ||
1465 | struct csio_lnode *ln = shost_priv(class_to_shost(dev)); | ||
1466 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
1467 | uint32_t dbg_level = 0; | ||
1468 | |||
1469 | if (!isdigit(buf[0])) | ||
1470 | return -EINVAL; | ||
1471 | |||
1472 | if (sscanf(buf, "%i", &dbg_level)) | ||
1473 | return -EINVAL; | ||
1474 | |||
1475 | ln->params.log_level = dbg_level; | ||
1476 | hw->params.log_level = dbg_level; | ||
1477 | |||
1478 | return 0; | ||
1479 | } | ||
1480 | |||
1481 | static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL); | ||
1482 | static DEVICE_ATTR(device_reset, S_IRUGO | S_IWUSR, NULL, csio_device_reset); | ||
1483 | static DEVICE_ATTR(disable_port, S_IRUGO | S_IWUSR, NULL, csio_disable_port); | ||
1484 | static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level, | ||
1485 | csio_store_dbg_level); | ||
1486 | |||
1487 | static struct device_attribute *csio_fcoe_lport_attrs[] = { | ||
1488 | &dev_attr_hw_state, | ||
1489 | &dev_attr_device_reset, | ||
1490 | &dev_attr_disable_port, | ||
1491 | &dev_attr_dbg_level, | ||
1492 | NULL, | ||
1493 | }; | ||
1494 | |||
1495 | static ssize_t | ||
1496 | csio_show_num_reg_rnodes(struct device *dev, | ||
1497 | struct device_attribute *attr, char *buf) | ||
1498 | { | ||
1499 | struct csio_lnode *ln = shost_priv(class_to_shost(dev)); | ||
1500 | |||
1501 | return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes); | ||
1502 | } | ||
1503 | |||
1504 | static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL); | ||
1505 | |||
1506 | static struct device_attribute *csio_fcoe_vport_attrs[] = { | ||
1507 | &dev_attr_num_reg_rnodes, | ||
1508 | &dev_attr_dbg_level, | ||
1509 | NULL, | ||
1510 | }; | ||
1511 | |||
1512 | static inline uint32_t | ||
1513 | csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req) | ||
1514 | { | ||
1515 | struct scsi_cmnd *scmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); | ||
1516 | struct scatterlist *sg; | ||
1517 | uint32_t bytes_left; | ||
1518 | uint32_t bytes_copy; | ||
1519 | uint32_t buf_off = 0; | ||
1520 | uint32_t start_off = 0; | ||
1521 | uint32_t sg_off = 0; | ||
1522 | void *sg_addr; | ||
1523 | void *buf_addr; | ||
1524 | struct csio_dma_buf *dma_buf; | ||
1525 | |||
1526 | bytes_left = scsi_bufflen(scmnd); | ||
1527 | sg = scsi_sglist(scmnd); | ||
1528 | dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list); | ||
1529 | |||
1530 | /* Copy data from driver buffer to SGs of SCSI CMD */ | ||
1531 | while (bytes_left > 0 && sg && dma_buf) { | ||
1532 | if (buf_off >= dma_buf->len) { | ||
1533 | buf_off = 0; | ||
1534 | dma_buf = (struct csio_dma_buf *) | ||
1535 | csio_list_next(dma_buf); | ||
1536 | continue; | ||
1537 | } | ||
1538 | |||
1539 | if (start_off >= sg->length) { | ||
1540 | start_off -= sg->length; | ||
1541 | sg = sg_next(sg); | ||
1542 | continue; | ||
1543 | } | ||
1544 | |||
1545 | buf_addr = dma_buf->vaddr + buf_off; | ||
1546 | sg_off = sg->offset + start_off; | ||
1547 | bytes_copy = min((dma_buf->len - buf_off), | ||
1548 | sg->length - start_off); | ||
1549 | bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)), | ||
1550 | bytes_copy); | ||
1551 | |||
1552 | sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT)); | ||
1553 | if (!sg_addr) { | ||
1554 | csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n", | ||
1555 | sg, req); | ||
1556 | break; | ||
1557 | } | ||
1558 | |||
1559 | csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n", | ||
1560 | sg_addr, sg_off, buf_addr, bytes_copy); | ||
1561 | memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy); | ||
1562 | kunmap_atomic(sg_addr); | ||
1563 | |||
1564 | start_off += bytes_copy; | ||
1565 | buf_off += bytes_copy; | ||
1566 | bytes_left -= bytes_copy; | ||
1567 | } | ||
1568 | |||
1569 | if (bytes_left > 0) | ||
1570 | return DID_ERROR; | ||
1571 | else | ||
1572 | return DID_OK; | ||
1573 | } | ||
1574 | |||
1575 | /* | ||
1576 | * csio_scsi_err_handler - SCSI error handler. | ||
1577 | * @hw: HW module. | ||
1578 | * @req: IO request. | ||
1579 | * | ||
1580 | */ | ||
1581 | static inline void | ||
1582 | csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req) | ||
1583 | { | ||
1584 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); | ||
1585 | struct csio_scsim *scm = csio_hw_to_scsim(hw); | ||
1586 | struct fcp_resp_with_ext *fcp_resp; | ||
1587 | struct fcp_resp_rsp_info *rsp_info; | ||
1588 | struct csio_dma_buf *dma_buf; | ||
1589 | uint8_t flags, scsi_status = 0; | ||
1590 | uint32_t host_status = DID_OK; | ||
1591 | uint32_t rsp_len = 0, sns_len = 0; | ||
1592 | struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); | ||
1593 | |||
1594 | |||
1595 | switch (req->wr_status) { | ||
1596 | case FW_HOSTERROR: | ||
1597 | if (unlikely(!csio_is_hw_ready(hw))) | ||
1598 | return; | ||
1599 | |||
1600 | host_status = DID_ERROR; | ||
1601 | CSIO_INC_STATS(scm, n_hosterror); | ||
1602 | |||
1603 | break; | ||
1604 | case FW_SCSI_RSP_ERR: | ||
1605 | dma_buf = &req->dma_buf; | ||
1606 | fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; | ||
1607 | rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1); | ||
1608 | flags = fcp_resp->resp.fr_flags; | ||
1609 | scsi_status = fcp_resp->resp.fr_status; | ||
1610 | |||
1611 | if (flags & FCP_RSP_LEN_VAL) { | ||
1612 | rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len); | ||
1613 | if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) || | ||
1614 | (rsp_info->rsp_code != FCP_TMF_CMPL)) { | ||
1615 | host_status = DID_ERROR; | ||
1616 | goto out; | ||
1617 | } | ||
1618 | } | ||
1619 | |||
1620 | if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) { | ||
1621 | sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len); | ||
1622 | if (sns_len > SCSI_SENSE_BUFFERSIZE) | ||
1623 | sns_len = SCSI_SENSE_BUFFERSIZE; | ||
1624 | |||
1625 | memcpy(cmnd->sense_buffer, | ||
1626 | &rsp_info->_fr_resvd[0] + rsp_len, sns_len); | ||
1627 | CSIO_INC_STATS(scm, n_autosense); | ||
1628 | } | ||
1629 | |||
1630 | scsi_set_resid(cmnd, 0); | ||
1631 | |||
1632 | /* Under run */ | ||
1633 | if (flags & FCP_RESID_UNDER) { | ||
1634 | scsi_set_resid(cmnd, | ||
1635 | be32_to_cpu(fcp_resp->ext.fr_resid)); | ||
1636 | |||
1637 | if (!(flags & FCP_SNS_LEN_VAL) && | ||
1638 | (scsi_status == SAM_STAT_GOOD) && | ||
1639 | ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd)) | ||
1640 | < cmnd->underflow)) | ||
1641 | host_status = DID_ERROR; | ||
1642 | } else if (flags & FCP_RESID_OVER) | ||
1643 | host_status = DID_ERROR; | ||
1644 | |||
1645 | CSIO_INC_STATS(scm, n_rsperror); | ||
1646 | break; | ||
1647 | |||
1648 | case FW_SCSI_OVER_FLOW_ERR: | ||
1649 | csio_warn(hw, | ||
1650 | "Over-flow error,cmnd:0x%x expected len:0x%x" | ||
1651 | " resid:0x%x\n", cmnd->cmnd[0], | ||
1652 | scsi_bufflen(cmnd), scsi_get_resid(cmnd)); | ||
1653 | host_status = DID_ERROR; | ||
1654 | CSIO_INC_STATS(scm, n_ovflerror); | ||
1655 | break; | ||
1656 | |||
1657 | case FW_SCSI_UNDER_FLOW_ERR: | ||
1658 | csio_warn(hw, | ||
1659 | "Under-flow error,cmnd:0x%x expected" | ||
1660 | " len:0x%x resid:0x%x lun:0x%x ssn:0x%x\n", | ||
1661 | cmnd->cmnd[0], scsi_bufflen(cmnd), | ||
1662 | scsi_get_resid(cmnd), cmnd->device->lun, | ||
1663 | rn->flowid); | ||
1664 | host_status = DID_ERROR; | ||
1665 | CSIO_INC_STATS(scm, n_unflerror); | ||
1666 | break; | ||
1667 | |||
1668 | case FW_SCSI_ABORT_REQUESTED: | ||
1669 | case FW_SCSI_ABORTED: | ||
1670 | case FW_SCSI_CLOSE_REQUESTED: | ||
1671 | csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd, | ||
1672 | cmnd->cmnd[0], | ||
1673 | (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ? | ||
1674 | "closed" : "aborted"); | ||
1675 | /* | ||
1676 | * csio_eh_abort_handler checks this value to | ||
1677 | * succeed or fail the abort request. | ||
1678 | */ | ||
1679 | host_status = DID_REQUEUE; | ||
1680 | if (req->wr_status == FW_SCSI_CLOSE_REQUESTED) | ||
1681 | CSIO_INC_STATS(scm, n_closed); | ||
1682 | else | ||
1683 | CSIO_INC_STATS(scm, n_aborted); | ||
1684 | break; | ||
1685 | |||
1686 | case FW_SCSI_ABORT_TIMEDOUT: | ||
1687 | /* FW timed out the abort itself */ | ||
1688 | csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n", | ||
1689 | req, cmnd, req->wr_status); | ||
1690 | host_status = DID_ERROR; | ||
1691 | CSIO_INC_STATS(scm, n_abrt_timedout); | ||
1692 | break; | ||
1693 | |||
1694 | case FW_RDEV_NOT_READY: | ||
1695 | /* | ||
1696 | * In firmware, a RDEV can get into this state | ||
1697 | * temporarily, before moving into dissapeared/lost | ||
1698 | * state. So, the driver should complete the request equivalent | ||
1699 | * to device-disappeared! | ||
1700 | */ | ||
1701 | CSIO_INC_STATS(scm, n_rdev_nr_error); | ||
1702 | host_status = DID_ERROR; | ||
1703 | break; | ||
1704 | |||
1705 | case FW_ERR_RDEV_LOST: | ||
1706 | CSIO_INC_STATS(scm, n_rdev_lost_error); | ||
1707 | host_status = DID_ERROR; | ||
1708 | break; | ||
1709 | |||
1710 | case FW_ERR_RDEV_LOGO: | ||
1711 | CSIO_INC_STATS(scm, n_rdev_logo_error); | ||
1712 | host_status = DID_ERROR; | ||
1713 | break; | ||
1714 | |||
1715 | case FW_ERR_RDEV_IMPL_LOGO: | ||
1716 | host_status = DID_ERROR; | ||
1717 | break; | ||
1718 | |||
1719 | case FW_ERR_LINK_DOWN: | ||
1720 | CSIO_INC_STATS(scm, n_link_down_error); | ||
1721 | host_status = DID_ERROR; | ||
1722 | break; | ||
1723 | |||
1724 | case FW_FCOE_NO_XCHG: | ||
1725 | CSIO_INC_STATS(scm, n_no_xchg_error); | ||
1726 | host_status = DID_ERROR; | ||
1727 | break; | ||
1728 | |||
1729 | default: | ||
1730 | csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n", | ||
1731 | req->wr_status, req, cmnd); | ||
1732 | CSIO_DB_ASSERT(0); | ||
1733 | |||
1734 | CSIO_INC_STATS(scm, n_unknown_error); | ||
1735 | host_status = DID_ERROR; | ||
1736 | break; | ||
1737 | } | ||
1738 | |||
1739 | out: | ||
1740 | if (req->nsge > 0) | ||
1741 | scsi_dma_unmap(cmnd); | ||
1742 | |||
1743 | cmnd->result = (((host_status) << 16) | scsi_status); | ||
1744 | cmnd->scsi_done(cmnd); | ||
1745 | |||
1746 | /* Wake up waiting threads */ | ||
1747 | csio_scsi_cmnd(req) = NULL; | ||
1748 | complete_all(&req->cmplobj); | ||
1749 | } | ||
1750 | |||
1751 | /* | ||
1752 | * csio_scsi_cbfn - SCSI callback function. | ||
1753 | * @hw: HW module. | ||
1754 | * @req: IO request. | ||
1755 | * | ||
1756 | */ | ||
1757 | static void | ||
1758 | csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req) | ||
1759 | { | ||
1760 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); | ||
1761 | uint8_t scsi_status = SAM_STAT_GOOD; | ||
1762 | uint32_t host_status = DID_OK; | ||
1763 | |||
1764 | if (likely(req->wr_status == FW_SUCCESS)) { | ||
1765 | if (req->nsge > 0) { | ||
1766 | scsi_dma_unmap(cmnd); | ||
1767 | if (req->dcopy) | ||
1768 | host_status = csio_scsi_copy_to_sgl(hw, req); | ||
1769 | } | ||
1770 | |||
1771 | cmnd->result = (((host_status) << 16) | scsi_status); | ||
1772 | cmnd->scsi_done(cmnd); | ||
1773 | csio_scsi_cmnd(req) = NULL; | ||
1774 | CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success); | ||
1775 | } else { | ||
1776 | /* Error handling */ | ||
1777 | csio_scsi_err_handler(hw, req); | ||
1778 | } | ||
1779 | } | ||
1780 | |||
1781 | /** | ||
1782 | * csio_queuecommand - Entry point to kickstart an I/O request. | ||
1783 | * @host: The scsi_host pointer. | ||
1784 | * @cmnd: The I/O request from ML. | ||
1785 | * | ||
1786 | * This routine does the following: | ||
1787 | * - Checks for HW and Rnode module readiness. | ||
1788 | * - Gets a free ioreq structure (which is already initialized | ||
1789 | * to uninit during its allocation). | ||
1790 | * - Maps SG elements. | ||
1791 | * - Initializes ioreq members. | ||
1792 | * - Kicks off the SCSI state machine for this IO. | ||
1793 | * - Returns busy status on error. | ||
1794 | */ | ||
1795 | static int | ||
1796 | csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd) | ||
1797 | { | ||
1798 | struct csio_lnode *ln = shost_priv(host); | ||
1799 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
1800 | struct csio_scsim *scsim = csio_hw_to_scsim(hw); | ||
1801 | struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); | ||
1802 | struct csio_ioreq *ioreq = NULL; | ||
1803 | unsigned long flags; | ||
1804 | int nsge = 0; | ||
1805 | int rv = SCSI_MLQUEUE_HOST_BUSY, nr; | ||
1806 | int retval; | ||
1807 | int cpu; | ||
1808 | struct csio_scsi_qset *sqset; | ||
1809 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); | ||
1810 | |||
1811 | if (!blk_rq_cpu_valid(cmnd->request)) | ||
1812 | cpu = smp_processor_id(); | ||
1813 | else | ||
1814 | cpu = cmnd->request->cpu; | ||
1815 | |||
1816 | sqset = &hw->sqset[ln->portid][cpu]; | ||
1817 | |||
1818 | nr = fc_remote_port_chkready(rport); | ||
1819 | if (nr) { | ||
1820 | cmnd->result = nr; | ||
1821 | CSIO_INC_STATS(scsim, n_rn_nr_error); | ||
1822 | goto err_done; | ||
1823 | } | ||
1824 | |||
1825 | if (unlikely(!csio_is_hw_ready(hw))) { | ||
1826 | cmnd->result = (DID_REQUEUE << 16); | ||
1827 | CSIO_INC_STATS(scsim, n_hw_nr_error); | ||
1828 | goto err_done; | ||
1829 | } | ||
1830 | |||
1831 | /* Get req->nsge, if there are SG elements to be mapped */ | ||
1832 | nsge = scsi_dma_map(cmnd); | ||
1833 | if (unlikely(nsge < 0)) { | ||
1834 | CSIO_INC_STATS(scsim, n_dmamap_error); | ||
1835 | goto err; | ||
1836 | } | ||
1837 | |||
1838 | /* Do we support so many mappings? */ | ||
1839 | if (unlikely(nsge > scsim->max_sge)) { | ||
1840 | csio_warn(hw, | ||
1841 | "More SGEs than can be supported." | ||
1842 | " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge); | ||
1843 | CSIO_INC_STATS(scsim, n_unsupp_sge_error); | ||
1844 | goto err_dma_unmap; | ||
1845 | } | ||
1846 | |||
1847 | /* Get a free ioreq structure - SM is already set to uninit */ | ||
1848 | ioreq = csio_get_scsi_ioreq_lock(hw, scsim); | ||
1849 | if (!ioreq) { | ||
1850 | csio_err(hw, "Out of I/O request elements. Active #:%d\n", | ||
1851 | scsim->stats.n_active); | ||
1852 | CSIO_INC_STATS(scsim, n_no_req_error); | ||
1853 | goto err_dma_unmap; | ||
1854 | } | ||
1855 | |||
1856 | ioreq->nsge = nsge; | ||
1857 | ioreq->lnode = ln; | ||
1858 | ioreq->rnode = rn; | ||
1859 | ioreq->iq_idx = sqset->iq_idx; | ||
1860 | ioreq->eq_idx = sqset->eq_idx; | ||
1861 | ioreq->wr_status = 0; | ||
1862 | ioreq->drv_status = 0; | ||
1863 | csio_scsi_cmnd(ioreq) = (void *)cmnd; | ||
1864 | ioreq->tmo = 0; | ||
1865 | ioreq->datadir = cmnd->sc_data_direction; | ||
1866 | |||
1867 | if (cmnd->sc_data_direction == DMA_TO_DEVICE) { | ||
1868 | CSIO_INC_STATS(ln, n_output_requests); | ||
1869 | ln->stats.n_output_bytes += scsi_bufflen(cmnd); | ||
1870 | } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) { | ||
1871 | CSIO_INC_STATS(ln, n_input_requests); | ||
1872 | ln->stats.n_input_bytes += scsi_bufflen(cmnd); | ||
1873 | } else | ||
1874 | CSIO_INC_STATS(ln, n_control_requests); | ||
1875 | |||
1876 | /* Set cbfn */ | ||
1877 | ioreq->io_cbfn = csio_scsi_cbfn; | ||
1878 | |||
1879 | /* Needed during abort */ | ||
1880 | cmnd->host_scribble = (unsigned char *)ioreq; | ||
1881 | cmnd->SCp.Message = 0; | ||
1882 | |||
1883 | /* Kick off SCSI IO SM on the ioreq */ | ||
1884 | spin_lock_irqsave(&hw->lock, flags); | ||
1885 | retval = csio_scsi_start_io(ioreq); | ||
1886 | spin_unlock_irqrestore(&hw->lock, flags); | ||
1887 | |||
1888 | if (retval != 0) { | ||
1889 | csio_err(hw, "ioreq: %p couldnt be started, status:%d\n", | ||
1890 | ioreq, retval); | ||
1891 | CSIO_INC_STATS(scsim, n_busy_error); | ||
1892 | goto err_put_req; | ||
1893 | } | ||
1894 | |||
1895 | return 0; | ||
1896 | |||
1897 | err_put_req: | ||
1898 | csio_put_scsi_ioreq_lock(hw, scsim, ioreq); | ||
1899 | err_dma_unmap: | ||
1900 | if (nsge > 0) | ||
1901 | scsi_dma_unmap(cmnd); | ||
1902 | err: | ||
1903 | return rv; | ||
1904 | |||
1905 | err_done: | ||
1906 | cmnd->scsi_done(cmnd); | ||
1907 | return 0; | ||
1908 | } | ||
1909 | |||
1910 | static int | ||
1911 | csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort) | ||
1912 | { | ||
1913 | int rv; | ||
1914 | int cpu = smp_processor_id(); | ||
1915 | struct csio_lnode *ln = ioreq->lnode; | ||
1916 | struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu]; | ||
1917 | |||
1918 | ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS; | ||
1919 | /* | ||
1920 | * Use current processor queue for posting the abort/close, but retain | ||
1921 | * the ingress queue ID of the original I/O being aborted/closed - we | ||
1922 | * need the abort/close completion to be received on the same queue | ||
1923 | * as the original I/O. | ||
1924 | */ | ||
1925 | ioreq->eq_idx = sqset->eq_idx; | ||
1926 | |||
1927 | if (abort == SCSI_ABORT) | ||
1928 | rv = csio_scsi_abort(ioreq); | ||
1929 | else | ||
1930 | rv = csio_scsi_close(ioreq); | ||
1931 | |||
1932 | return rv; | ||
1933 | } | ||
1934 | |||
1935 | static int | ||
1936 | csio_eh_abort_handler(struct scsi_cmnd *cmnd) | ||
1937 | { | ||
1938 | struct csio_ioreq *ioreq; | ||
1939 | struct csio_lnode *ln = shost_priv(cmnd->device->host); | ||
1940 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
1941 | struct csio_scsim *scsim = csio_hw_to_scsim(hw); | ||
1942 | int ready = 0, ret; | ||
1943 | unsigned long tmo = 0; | ||
1944 | int rv; | ||
1945 | struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); | ||
1946 | |||
1947 | ret = fc_block_scsi_eh(cmnd); | ||
1948 | if (ret) | ||
1949 | return ret; | ||
1950 | |||
1951 | ioreq = (struct csio_ioreq *)cmnd->host_scribble; | ||
1952 | if (!ioreq) | ||
1953 | return SUCCESS; | ||
1954 | |||
1955 | if (!rn) | ||
1956 | return FAILED; | ||
1957 | |||
1958 | csio_dbg(hw, | ||
1959 | "Request to abort ioreq:%p cmd:%p cdb:%08llx" | ||
1960 | " ssni:0x%x lun:%d iq:0x%x\n", | ||
1961 | ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid, | ||
1962 | cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx)); | ||
1963 | |||
1964 | if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) { | ||
1965 | CSIO_INC_STATS(scsim, n_abrt_race_comp); | ||
1966 | return SUCCESS; | ||
1967 | } | ||
1968 | |||
1969 | ready = csio_is_lnode_ready(ln); | ||
1970 | tmo = CSIO_SCSI_ABRT_TMO_MS; | ||
1971 | |||
1972 | spin_lock_irq(&hw->lock); | ||
1973 | rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE)); | ||
1974 | spin_unlock_irq(&hw->lock); | ||
1975 | |||
1976 | if (rv != 0) { | ||
1977 | if (rv == -EINVAL) { | ||
1978 | /* Return success, if abort/close request issued on | ||
1979 | * already completed IO | ||
1980 | */ | ||
1981 | return SUCCESS; | ||
1982 | } | ||
1983 | if (ready) | ||
1984 | CSIO_INC_STATS(scsim, n_abrt_busy_error); | ||
1985 | else | ||
1986 | CSIO_INC_STATS(scsim, n_cls_busy_error); | ||
1987 | |||
1988 | goto inval_scmnd; | ||
1989 | } | ||
1990 | |||
1991 | /* Wait for completion */ | ||
1992 | init_completion(&ioreq->cmplobj); | ||
1993 | wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo)); | ||
1994 | |||
1995 | /* FW didnt respond to abort within our timeout */ | ||
1996 | if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) { | ||
1997 | |||
1998 | csio_err(hw, "Abort timed out -- req: %p\n", ioreq); | ||
1999 | CSIO_INC_STATS(scsim, n_abrt_timedout); | ||
2000 | |||
2001 | inval_scmnd: | ||
2002 | if (ioreq->nsge > 0) | ||
2003 | scsi_dma_unmap(cmnd); | ||
2004 | |||
2005 | spin_lock_irq(&hw->lock); | ||
2006 | csio_scsi_cmnd(ioreq) = NULL; | ||
2007 | spin_unlock_irq(&hw->lock); | ||
2008 | |||
2009 | cmnd->result = (DID_ERROR << 16); | ||
2010 | cmnd->scsi_done(cmnd); | ||
2011 | |||
2012 | return FAILED; | ||
2013 | } | ||
2014 | |||
2015 | /* FW successfully aborted the request */ | ||
2016 | if (host_byte(cmnd->result) == DID_REQUEUE) { | ||
2017 | csio_info(hw, | ||
2018 | "Aborted SCSI command to (%d:%d) serial#:0x%lx\n", | ||
2019 | cmnd->device->id, cmnd->device->lun, | ||
2020 | cmnd->serial_number); | ||
2021 | return SUCCESS; | ||
2022 | } else { | ||
2023 | csio_info(hw, | ||
2024 | "Failed to abort SCSI command, (%d:%d) serial#:0x%lx\n", | ||
2025 | cmnd->device->id, cmnd->device->lun, | ||
2026 | cmnd->serial_number); | ||
2027 | return FAILED; | ||
2028 | } | ||
2029 | } | ||
2030 | |||
2031 | /* | ||
2032 | * csio_tm_cbfn - TM callback function. | ||
2033 | * @hw: HW module. | ||
2034 | * @req: IO request. | ||
2035 | * | ||
2036 | * Cache the result in 'cmnd', since ioreq will be freed soon | ||
2037 | * after we return from here, and the waiting thread shouldnt trust | ||
2038 | * the ioreq contents. | ||
2039 | */ | ||
2040 | static void | ||
2041 | csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req) | ||
2042 | { | ||
2043 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); | ||
2044 | struct csio_dma_buf *dma_buf; | ||
2045 | uint8_t flags = 0; | ||
2046 | struct fcp_resp_with_ext *fcp_resp; | ||
2047 | struct fcp_resp_rsp_info *rsp_info; | ||
2048 | |||
2049 | csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n", | ||
2050 | req, req->wr_status); | ||
2051 | |||
2052 | /* Cache FW return status */ | ||
2053 | cmnd->SCp.Status = req->wr_status; | ||
2054 | |||
2055 | /* Special handling based on FCP response */ | ||
2056 | |||
2057 | /* | ||
2058 | * FW returns us this error, if flags were set. FCP4 says | ||
2059 | * FCP_RSP_LEN_VAL in flags shall be set for TM completions. | ||
2060 | * So if a target were to set this bit, we expect that the | ||
2061 | * rsp_code is set to FCP_TMF_CMPL for a successful TM | ||
2062 | * completion. Any other rsp_code means TM operation failed. | ||
2063 | * If a target were to just ignore setting flags, we treat | ||
2064 | * the TM operation as success, and FW returns FW_SUCCESS. | ||
2065 | */ | ||
2066 | if (req->wr_status == FW_SCSI_RSP_ERR) { | ||
2067 | dma_buf = &req->dma_buf; | ||
2068 | fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; | ||
2069 | rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1); | ||
2070 | |||
2071 | flags = fcp_resp->resp.fr_flags; | ||
2072 | |||
2073 | /* Modify return status if flags indicate success */ | ||
2074 | if (flags & FCP_RSP_LEN_VAL) | ||
2075 | if (rsp_info->rsp_code == FCP_TMF_CMPL) | ||
2076 | cmnd->SCp.Status = FW_SUCCESS; | ||
2077 | |||
2078 | csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code); | ||
2079 | } | ||
2080 | |||
2081 | /* Wake up the TM handler thread */ | ||
2082 | csio_scsi_cmnd(req) = NULL; | ||
2083 | } | ||
2084 | |||
2085 | static int | ||
2086 | csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd) | ||
2087 | { | ||
2088 | struct csio_lnode *ln = shost_priv(cmnd->device->host); | ||
2089 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
2090 | struct csio_scsim *scsim = csio_hw_to_scsim(hw); | ||
2091 | struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); | ||
2092 | struct csio_ioreq *ioreq = NULL; | ||
2093 | struct csio_scsi_qset *sqset; | ||
2094 | unsigned long flags; | ||
2095 | int retval; | ||
2096 | int count, ret; | ||
2097 | LIST_HEAD(local_q); | ||
2098 | struct csio_scsi_level_data sld; | ||
2099 | |||
2100 | if (!rn) | ||
2101 | goto fail; | ||
2102 | |||
2103 | csio_dbg(hw, "Request to reset LUN:%d (ssni:0x%x tgtid:%d)\n", | ||
2104 | cmnd->device->lun, rn->flowid, rn->scsi_id); | ||
2105 | |||
2106 | if (!csio_is_lnode_ready(ln)) { | ||
2107 | csio_err(hw, | ||
2108 | "LUN reset cannot be issued on non-ready" | ||
2109 | " local node vnpi:0x%x (LUN:%d)\n", | ||
2110 | ln->vnp_flowid, cmnd->device->lun); | ||
2111 | goto fail; | ||
2112 | } | ||
2113 | |||
2114 | /* Lnode is ready, now wait on rport node readiness */ | ||
2115 | ret = fc_block_scsi_eh(cmnd); | ||
2116 | if (ret) | ||
2117 | return ret; | ||
2118 | |||
2119 | /* | ||
2120 | * If we have blocked in the previous call, at this point, either the | ||
2121 | * remote node has come back online, or device loss timer has fired | ||
2122 | * and the remote node is destroyed. Allow the LUN reset only for | ||
2123 | * the former case, since LUN reset is a TMF I/O on the wire, and we | ||
2124 | * need a valid session to issue it. | ||
2125 | */ | ||
2126 | if (fc_remote_port_chkready(rn->rport)) { | ||
2127 | csio_err(hw, | ||
2128 | "LUN reset cannot be issued on non-ready" | ||
2129 | " remote node ssni:0x%x (LUN:%d)\n", | ||
2130 | rn->flowid, cmnd->device->lun); | ||
2131 | goto fail; | ||
2132 | } | ||
2133 | |||
2134 | /* Get a free ioreq structure - SM is already set to uninit */ | ||
2135 | ioreq = csio_get_scsi_ioreq_lock(hw, scsim); | ||
2136 | |||
2137 | if (!ioreq) { | ||
2138 | csio_err(hw, "Out of IO request elements. Active # :%d\n", | ||
2139 | scsim->stats.n_active); | ||
2140 | goto fail; | ||
2141 | } | ||
2142 | |||
2143 | sqset = &hw->sqset[ln->portid][smp_processor_id()]; | ||
2144 | ioreq->nsge = 0; | ||
2145 | ioreq->lnode = ln; | ||
2146 | ioreq->rnode = rn; | ||
2147 | ioreq->iq_idx = sqset->iq_idx; | ||
2148 | ioreq->eq_idx = sqset->eq_idx; | ||
2149 | |||
2150 | csio_scsi_cmnd(ioreq) = cmnd; | ||
2151 | cmnd->host_scribble = (unsigned char *)ioreq; | ||
2152 | cmnd->SCp.Status = 0; | ||
2153 | |||
2154 | cmnd->SCp.Message = FCP_TMF_LUN_RESET; | ||
2155 | ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000; | ||
2156 | |||
2157 | /* | ||
2158 | * FW times the LUN reset for ioreq->tmo, so we got to wait a little | ||
2159 | * longer (10s for now) than that to allow FW to return the timed | ||
2160 | * out command. | ||
2161 | */ | ||
2162 | count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS); | ||
2163 | |||
2164 | /* Set cbfn */ | ||
2165 | ioreq->io_cbfn = csio_tm_cbfn; | ||
2166 | |||
2167 | /* Save of the ioreq info for later use */ | ||
2168 | sld.level = CSIO_LEV_LUN; | ||
2169 | sld.lnode = ioreq->lnode; | ||
2170 | sld.rnode = ioreq->rnode; | ||
2171 | sld.oslun = (uint64_t)cmnd->device->lun; | ||
2172 | |||
2173 | spin_lock_irqsave(&hw->lock, flags); | ||
2174 | /* Kick off TM SM on the ioreq */ | ||
2175 | retval = csio_scsi_start_tm(ioreq); | ||
2176 | spin_unlock_irqrestore(&hw->lock, flags); | ||
2177 | |||
2178 | if (retval != 0) { | ||
2179 | csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n", | ||
2180 | ioreq, retval); | ||
2181 | goto fail_ret_ioreq; | ||
2182 | } | ||
2183 | |||
2184 | csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n", | ||
2185 | count * (CSIO_SCSI_TM_POLL_MS / 1000)); | ||
2186 | /* Wait for completion */ | ||
2187 | while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) | ||
2188 | && count--) | ||
2189 | msleep(CSIO_SCSI_TM_POLL_MS); | ||
2190 | |||
2191 | /* LUN reset timed-out */ | ||
2192 | if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) { | ||
2193 | csio_err(hw, "LUN reset (%d:%d) timed out\n", | ||
2194 | cmnd->device->id, cmnd->device->lun); | ||
2195 | |||
2196 | spin_lock_irq(&hw->lock); | ||
2197 | csio_scsi_drvcleanup(ioreq); | ||
2198 | list_del_init(&ioreq->sm.sm_list); | ||
2199 | spin_unlock_irq(&hw->lock); | ||
2200 | |||
2201 | goto fail_ret_ioreq; | ||
2202 | } | ||
2203 | |||
2204 | /* LUN reset returned, check cached status */ | ||
2205 | if (cmnd->SCp.Status != FW_SUCCESS) { | ||
2206 | csio_err(hw, "LUN reset failed (%d:%d), status: %d\n", | ||
2207 | cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status); | ||
2208 | goto fail; | ||
2209 | } | ||
2210 | |||
2211 | /* LUN reset succeeded, Start aborting affected I/Os */ | ||
2212 | /* | ||
2213 | * Since the host guarantees during LUN reset that there | ||
2214 | * will not be any more I/Os to that LUN, until the LUN reset | ||
2215 | * completes, we gather pending I/Os after the LUN reset. | ||
2216 | */ | ||
2217 | spin_lock_irq(&hw->lock); | ||
2218 | csio_scsi_gather_active_ios(scsim, &sld, &local_q); | ||
2219 | |||
2220 | retval = csio_scsi_abort_io_q(scsim, &local_q, 30000); | ||
2221 | spin_unlock_irq(&hw->lock); | ||
2222 | |||
2223 | /* Aborts may have timed out */ | ||
2224 | if (retval != 0) { | ||
2225 | csio_err(hw, | ||
2226 | "Attempt to abort I/Os during LUN reset of %d" | ||
2227 | " returned %d\n", cmnd->device->lun, retval); | ||
2228 | /* Return I/Os back to active_q */ | ||
2229 | spin_lock_irq(&hw->lock); | ||
2230 | list_splice_tail_init(&local_q, &scsim->active_q); | ||
2231 | spin_unlock_irq(&hw->lock); | ||
2232 | goto fail; | ||
2233 | } | ||
2234 | |||
2235 | CSIO_INC_STATS(rn, n_lun_rst); | ||
2236 | |||
2237 | csio_info(hw, "LUN reset occurred (%d:%d)\n", | ||
2238 | cmnd->device->id, cmnd->device->lun); | ||
2239 | |||
2240 | return SUCCESS; | ||
2241 | |||
2242 | fail_ret_ioreq: | ||
2243 | csio_put_scsi_ioreq_lock(hw, scsim, ioreq); | ||
2244 | fail: | ||
2245 | CSIO_INC_STATS(rn, n_lun_rst_fail); | ||
2246 | return FAILED; | ||
2247 | } | ||
2248 | |||
2249 | static int | ||
2250 | csio_slave_alloc(struct scsi_device *sdev) | ||
2251 | { | ||
2252 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); | ||
2253 | |||
2254 | if (!rport || fc_remote_port_chkready(rport)) | ||
2255 | return -ENXIO; | ||
2256 | |||
2257 | sdev->hostdata = *((struct csio_lnode **)(rport->dd_data)); | ||
2258 | |||
2259 | return 0; | ||
2260 | } | ||
2261 | |||
2262 | static int | ||
2263 | csio_slave_configure(struct scsi_device *sdev) | ||
2264 | { | ||
2265 | if (sdev->tagged_supported) | ||
2266 | scsi_activate_tcq(sdev, csio_lun_qdepth); | ||
2267 | else | ||
2268 | scsi_deactivate_tcq(sdev, csio_lun_qdepth); | ||
2269 | |||
2270 | return 0; | ||
2271 | } | ||
2272 | |||
2273 | static void | ||
2274 | csio_slave_destroy(struct scsi_device *sdev) | ||
2275 | { | ||
2276 | sdev->hostdata = NULL; | ||
2277 | } | ||
2278 | |||
2279 | static int | ||
2280 | csio_scan_finished(struct Scsi_Host *shost, unsigned long time) | ||
2281 | { | ||
2282 | struct csio_lnode *ln = shost_priv(shost); | ||
2283 | int rv = 1; | ||
2284 | |||
2285 | spin_lock_irq(shost->host_lock); | ||
2286 | if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list)) | ||
2287 | goto out; | ||
2288 | |||
2289 | rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ, | ||
2290 | csio_delta_scan_tmo * HZ); | ||
2291 | out: | ||
2292 | spin_unlock_irq(shost->host_lock); | ||
2293 | |||
2294 | return rv; | ||
2295 | } | ||
2296 | |||
2297 | struct scsi_host_template csio_fcoe_shost_template = { | ||
2298 | .module = THIS_MODULE, | ||
2299 | .name = CSIO_DRV_DESC, | ||
2300 | .proc_name = KBUILD_MODNAME, | ||
2301 | .queuecommand = csio_queuecommand, | ||
2302 | .eh_abort_handler = csio_eh_abort_handler, | ||
2303 | .eh_device_reset_handler = csio_eh_lun_reset_handler, | ||
2304 | .slave_alloc = csio_slave_alloc, | ||
2305 | .slave_configure = csio_slave_configure, | ||
2306 | .slave_destroy = csio_slave_destroy, | ||
2307 | .scan_finished = csio_scan_finished, | ||
2308 | .this_id = -1, | ||
2309 | .sg_tablesize = CSIO_SCSI_MAX_SGE, | ||
2310 | .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, | ||
2311 | .use_clustering = ENABLE_CLUSTERING, | ||
2312 | .shost_attrs = csio_fcoe_lport_attrs, | ||
2313 | .max_sectors = CSIO_MAX_SECTOR_SIZE, | ||
2314 | }; | ||
2315 | |||
2316 | struct scsi_host_template csio_fcoe_shost_vport_template = { | ||
2317 | .module = THIS_MODULE, | ||
2318 | .name = CSIO_DRV_DESC, | ||
2319 | .proc_name = KBUILD_MODNAME, | ||
2320 | .queuecommand = csio_queuecommand, | ||
2321 | .eh_abort_handler = csio_eh_abort_handler, | ||
2322 | .eh_device_reset_handler = csio_eh_lun_reset_handler, | ||
2323 | .slave_alloc = csio_slave_alloc, | ||
2324 | .slave_configure = csio_slave_configure, | ||
2325 | .slave_destroy = csio_slave_destroy, | ||
2326 | .scan_finished = csio_scan_finished, | ||
2327 | .this_id = -1, | ||
2328 | .sg_tablesize = CSIO_SCSI_MAX_SGE, | ||
2329 | .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, | ||
2330 | .use_clustering = ENABLE_CLUSTERING, | ||
2331 | .shost_attrs = csio_fcoe_vport_attrs, | ||
2332 | .max_sectors = CSIO_MAX_SECTOR_SIZE, | ||
2333 | }; | ||
2334 | |||
2335 | /* | ||
2336 | * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs. | ||
2337 | * @scm: SCSI Module | ||
2338 | * @hw: HW device. | ||
2339 | * @buf_size: buffer size | ||
2340 | * @num_buf : Number of buffers. | ||
2341 | * | ||
2342 | * This routine allocates DMA buffers required for SCSI Data xfer, if | ||
2343 | * each SGL buffer for a SCSI Read request posted by SCSI midlayer are | ||
2344 | * not virtually contiguous. | ||
2345 | */ | ||
2346 | static int | ||
2347 | csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw, | ||
2348 | int buf_size, int num_buf) | ||
2349 | { | ||
2350 | int n = 0; | ||
2351 | struct list_head *tmp; | ||
2352 | struct csio_dma_buf *ddp_desc = NULL; | ||
2353 | uint32_t unit_size = 0; | ||
2354 | |||
2355 | if (!num_buf) | ||
2356 | return 0; | ||
2357 | |||
2358 | if (!buf_size) | ||
2359 | return -EINVAL; | ||
2360 | |||
2361 | INIT_LIST_HEAD(&scm->ddp_freelist); | ||
2362 | |||
2363 | /* Align buf size to page size */ | ||
2364 | buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK; | ||
2365 | /* Initialize dma descriptors */ | ||
2366 | for (n = 0; n < num_buf; n++) { | ||
2367 | /* Set unit size to request size */ | ||
2368 | unit_size = buf_size; | ||
2369 | ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL); | ||
2370 | if (!ddp_desc) { | ||
2371 | csio_err(hw, | ||
2372 | "Failed to allocate ddp descriptors," | ||
2373 | " Num allocated = %d.\n", | ||
2374 | scm->stats.n_free_ddp); | ||
2375 | goto no_mem; | ||
2376 | } | ||
2377 | |||
2378 | /* Allocate Dma buffers for DDP */ | ||
2379 | ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size, | ||
2380 | &ddp_desc->paddr); | ||
2381 | if (!ddp_desc->vaddr) { | ||
2382 | csio_err(hw, | ||
2383 | "SCSI response DMA buffer (ddp) allocation" | ||
2384 | " failed!\n"); | ||
2385 | kfree(ddp_desc); | ||
2386 | goto no_mem; | ||
2387 | } | ||
2388 | |||
2389 | ddp_desc->len = unit_size; | ||
2390 | |||
2391 | /* Added it to scsi ddp freelist */ | ||
2392 | list_add_tail(&ddp_desc->list, &scm->ddp_freelist); | ||
2393 | CSIO_INC_STATS(scm, n_free_ddp); | ||
2394 | } | ||
2395 | |||
2396 | return 0; | ||
2397 | no_mem: | ||
2398 | /* release dma descs back to freelist and free dma memory */ | ||
2399 | list_for_each(tmp, &scm->ddp_freelist) { | ||
2400 | ddp_desc = (struct csio_dma_buf *) tmp; | ||
2401 | tmp = csio_list_prev(tmp); | ||
2402 | pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr, | ||
2403 | ddp_desc->paddr); | ||
2404 | list_del_init(&ddp_desc->list); | ||
2405 | kfree(ddp_desc); | ||
2406 | } | ||
2407 | scm->stats.n_free_ddp = 0; | ||
2408 | |||
2409 | return -ENOMEM; | ||
2410 | } | ||
2411 | |||
2412 | /* | ||
2413 | * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs. | ||
2414 | * @scm: SCSI Module | ||
2415 | * @hw: HW device. | ||
2416 | * | ||
2417 | * This routine frees ddp buffers. | ||
2418 | */ | ||
2419 | static void | ||
2420 | csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw) | ||
2421 | { | ||
2422 | struct list_head *tmp; | ||
2423 | struct csio_dma_buf *ddp_desc; | ||
2424 | |||
2425 | /* release dma descs back to freelist and free dma memory */ | ||
2426 | list_for_each(tmp, &scm->ddp_freelist) { | ||
2427 | ddp_desc = (struct csio_dma_buf *) tmp; | ||
2428 | tmp = csio_list_prev(tmp); | ||
2429 | pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr, | ||
2430 | ddp_desc->paddr); | ||
2431 | list_del_init(&ddp_desc->list); | ||
2432 | kfree(ddp_desc); | ||
2433 | } | ||
2434 | scm->stats.n_free_ddp = 0; | ||
2435 | } | ||
2436 | |||
2437 | /** | ||
2438 | * csio_scsim_init - Initialize SCSI Module | ||
2439 | * @scm: SCSI Module | ||
2440 | * @hw: HW module | ||
2441 | * | ||
2442 | */ | ||
2443 | int | ||
2444 | csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw) | ||
2445 | { | ||
2446 | int i; | ||
2447 | struct csio_ioreq *ioreq; | ||
2448 | struct csio_dma_buf *dma_buf; | ||
2449 | |||
2450 | INIT_LIST_HEAD(&scm->active_q); | ||
2451 | scm->hw = hw; | ||
2452 | |||
2453 | scm->proto_cmd_len = sizeof(struct fcp_cmnd); | ||
2454 | scm->proto_rsp_len = CSIO_SCSI_RSP_LEN; | ||
2455 | scm->max_sge = CSIO_SCSI_MAX_SGE; | ||
2456 | |||
2457 | spin_lock_init(&scm->freelist_lock); | ||
2458 | |||
2459 | /* Pre-allocate ioreqs and initialize them */ | ||
2460 | INIT_LIST_HEAD(&scm->ioreq_freelist); | ||
2461 | for (i = 0; i < csio_scsi_ioreqs; i++) { | ||
2462 | |||
2463 | ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL); | ||
2464 | if (!ioreq) { | ||
2465 | csio_err(hw, | ||
2466 | "I/O request element allocation failed, " | ||
2467 | " Num allocated = %d.\n", | ||
2468 | scm->stats.n_free_ioreq); | ||
2469 | |||
2470 | goto free_ioreq; | ||
2471 | } | ||
2472 | |||
2473 | /* Allocate Dma buffers for Response Payload */ | ||
2474 | dma_buf = &ioreq->dma_buf; | ||
2475 | dma_buf->vaddr = pci_pool_alloc(hw->scsi_pci_pool, GFP_KERNEL, | ||
2476 | &dma_buf->paddr); | ||
2477 | if (!dma_buf->vaddr) { | ||
2478 | csio_err(hw, | ||
2479 | "SCSI response DMA buffer allocation" | ||
2480 | " failed!\n"); | ||
2481 | kfree(ioreq); | ||
2482 | goto free_ioreq; | ||
2483 | } | ||
2484 | |||
2485 | dma_buf->len = scm->proto_rsp_len; | ||
2486 | |||
2487 | /* Set state to uninit */ | ||
2488 | csio_init_state(&ioreq->sm, csio_scsis_uninit); | ||
2489 | INIT_LIST_HEAD(&ioreq->gen_list); | ||
2490 | init_completion(&ioreq->cmplobj); | ||
2491 | |||
2492 | list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist); | ||
2493 | CSIO_INC_STATS(scm, n_free_ioreq); | ||
2494 | } | ||
2495 | |||
2496 | if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs)) | ||
2497 | goto free_ioreq; | ||
2498 | |||
2499 | return 0; | ||
2500 | |||
2501 | free_ioreq: | ||
2502 | /* | ||
2503 | * Free up existing allocations, since an error | ||
2504 | * from here means we are returning for good | ||
2505 | */ | ||
2506 | while (!list_empty(&scm->ioreq_freelist)) { | ||
2507 | struct csio_sm *tmp; | ||
2508 | |||
2509 | tmp = list_first_entry(&scm->ioreq_freelist, | ||
2510 | struct csio_sm, sm_list); | ||
2511 | list_del_init(&tmp->sm_list); | ||
2512 | ioreq = (struct csio_ioreq *)tmp; | ||
2513 | |||
2514 | dma_buf = &ioreq->dma_buf; | ||
2515 | pci_pool_free(hw->scsi_pci_pool, dma_buf->vaddr, | ||
2516 | dma_buf->paddr); | ||
2517 | |||
2518 | kfree(ioreq); | ||
2519 | } | ||
2520 | |||
2521 | scm->stats.n_free_ioreq = 0; | ||
2522 | |||
2523 | return -ENOMEM; | ||
2524 | } | ||
2525 | |||
2526 | /** | ||
2527 | * csio_scsim_exit: Uninitialize SCSI Module | ||
2528 | * @scm: SCSI Module | ||
2529 | * | ||
2530 | */ | ||
2531 | void | ||
2532 | csio_scsim_exit(struct csio_scsim *scm) | ||
2533 | { | ||
2534 | struct csio_ioreq *ioreq; | ||
2535 | struct csio_dma_buf *dma_buf; | ||
2536 | |||
2537 | while (!list_empty(&scm->ioreq_freelist)) { | ||
2538 | struct csio_sm *tmp; | ||
2539 | |||
2540 | tmp = list_first_entry(&scm->ioreq_freelist, | ||
2541 | struct csio_sm, sm_list); | ||
2542 | list_del_init(&tmp->sm_list); | ||
2543 | ioreq = (struct csio_ioreq *)tmp; | ||
2544 | |||
2545 | dma_buf = &ioreq->dma_buf; | ||
2546 | pci_pool_free(scm->hw->scsi_pci_pool, dma_buf->vaddr, | ||
2547 | dma_buf->paddr); | ||
2548 | |||
2549 | kfree(ioreq); | ||
2550 | } | ||
2551 | |||
2552 | scm->stats.n_free_ioreq = 0; | ||
2553 | |||
2554 | csio_scsi_free_ddp_bufs(scm, scm->hw); | ||
2555 | } | ||
diff --git a/drivers/scsi/csiostor/csio_scsi.h b/drivers/scsi/csiostor/csio_scsi.h new file mode 100644 index 000000000000..2257c3dcf724 --- /dev/null +++ b/drivers/scsi/csiostor/csio_scsi.h | |||
@@ -0,0 +1,342 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef __CSIO_SCSI_H__ | ||
36 | #define __CSIO_SCSI_H__ | ||
37 | |||
38 | #include <linux/spinlock_types.h> | ||
39 | #include <linux/completion.h> | ||
40 | #include <scsi/scsi.h> | ||
41 | #include <scsi/scsi_cmnd.h> | ||
42 | #include <scsi/scsi_device.h> | ||
43 | #include <scsi/scsi_host.h> | ||
44 | #include <scsi/scsi_eh.h> | ||
45 | #include <scsi/scsi_tcq.h> | ||
46 | #include <scsi/fc/fc_fcp.h> | ||
47 | |||
48 | #include "csio_defs.h" | ||
49 | #include "csio_wr.h" | ||
50 | |||
51 | extern struct scsi_host_template csio_fcoe_shost_template; | ||
52 | extern struct scsi_host_template csio_fcoe_shost_vport_template; | ||
53 | |||
54 | extern int csio_scsi_eqsize; | ||
55 | extern int csio_scsi_iqlen; | ||
56 | extern int csio_scsi_ioreqs; | ||
57 | extern uint32_t csio_max_scan_tmo; | ||
58 | extern uint32_t csio_delta_scan_tmo; | ||
59 | extern int csio_lun_qdepth; | ||
60 | |||
61 | /* | ||
62 | **************************** NOTE ******************************* | ||
63 | * How do we calculate MAX FCoE SCSI SGEs? Here is the math: | ||
64 | * Max Egress WR size = 512 bytes | ||
65 | * One SCSI egress WR has the following fixed no of bytes: | ||
66 | * 48 (sizeof(struct fw_scsi_write[read]_wr)) - FW WR | ||
67 | * + 32 (sizeof(struct fc_fcp_cmnd)) - Immediate FCP_CMD | ||
68 | * ------ | ||
69 | * 80 | ||
70 | * ------ | ||
71 | * That leaves us with 512 - 96 = 432 bytes for data SGE. Using | ||
72 | * struct ulptx_sgl header for the SGE consumes: | ||
73 | * - 4 bytes for cmnd_sge. | ||
74 | * - 12 bytes for the first SGL. | ||
75 | * That leaves us with 416 bytes for the remaining SGE pairs. Which is | ||
76 | * is 416 / 24 (size(struct ulptx_sge_pair)) = 17 SGE pairs, | ||
77 | * or 34 SGEs. Adding the first SGE fetches us 35 SGEs. | ||
78 | */ | ||
79 | #define CSIO_SCSI_MAX_SGE 35 | ||
80 | #define CSIO_SCSI_ABRT_TMO_MS 60000 | ||
81 | #define CSIO_SCSI_LUNRST_TMO_MS 60000 | ||
82 | #define CSIO_SCSI_TM_POLL_MS 2000 /* should be less than | ||
83 | * all TM timeouts. | ||
84 | */ | ||
85 | #define CSIO_SCSI_IQ_WRSZ 128 | ||
86 | #define CSIO_SCSI_IQSIZE (csio_scsi_iqlen * CSIO_SCSI_IQ_WRSZ) | ||
87 | |||
88 | #define CSIO_MAX_SNS_LEN 128 | ||
89 | #define CSIO_SCSI_RSP_LEN (FCP_RESP_WITH_EXT + 4 + CSIO_MAX_SNS_LEN) | ||
90 | |||
91 | /* Reference to scsi_cmnd */ | ||
92 | #define csio_scsi_cmnd(req) ((req)->scratch1) | ||
93 | |||
94 | struct csio_scsi_stats { | ||
95 | uint64_t n_tot_success; /* Total number of good I/Os */ | ||
96 | uint32_t n_rn_nr_error; /* No. of remote-node-not- | ||
97 | * ready errors | ||
98 | */ | ||
99 | uint32_t n_hw_nr_error; /* No. of hw-module-not- | ||
100 | * ready errors | ||
101 | */ | ||
102 | uint32_t n_dmamap_error; /* No. of DMA map erros */ | ||
103 | uint32_t n_unsupp_sge_error; /* No. of too-many-SGes | ||
104 | * errors. | ||
105 | */ | ||
106 | uint32_t n_no_req_error; /* No. of Out-of-ioreqs error */ | ||
107 | uint32_t n_busy_error; /* No. of -EBUSY errors */ | ||
108 | uint32_t n_hosterror; /* No. of FW_HOSTERROR I/O */ | ||
109 | uint32_t n_rsperror; /* No. of response errors */ | ||
110 | uint32_t n_autosense; /* No. of auto sense replies */ | ||
111 | uint32_t n_ovflerror; /* No. of overflow errors */ | ||
112 | uint32_t n_unflerror; /* No. of underflow errors */ | ||
113 | uint32_t n_rdev_nr_error;/* No. of rdev not | ||
114 | * ready errors | ||
115 | */ | ||
116 | uint32_t n_rdev_lost_error;/* No. of rdev lost errors */ | ||
117 | uint32_t n_rdev_logo_error;/* No. of rdev logo errors */ | ||
118 | uint32_t n_link_down_error;/* No. of link down errors */ | ||
119 | uint32_t n_no_xchg_error; /* No. no exchange error */ | ||
120 | uint32_t n_unknown_error;/* No. of unhandled errors */ | ||
121 | uint32_t n_aborted; /* No. of aborted I/Os */ | ||
122 | uint32_t n_abrt_timedout; /* No. of abort timedouts */ | ||
123 | uint32_t n_abrt_fail; /* No. of abort failures */ | ||
124 | uint32_t n_abrt_dups; /* No. of duplicate aborts */ | ||
125 | uint32_t n_abrt_race_comp; /* No. of aborts that raced | ||
126 | * with completions. | ||
127 | */ | ||
128 | uint32_t n_abrt_busy_error;/* No. of abort failures | ||
129 | * due to -EBUSY. | ||
130 | */ | ||
131 | uint32_t n_closed; /* No. of closed I/Os */ | ||
132 | uint32_t n_cls_busy_error; /* No. of close failures | ||
133 | * due to -EBUSY. | ||
134 | */ | ||
135 | uint32_t n_active; /* No. of IOs in active_q */ | ||
136 | uint32_t n_tm_active; /* No. of TMs in active_q */ | ||
137 | uint32_t n_wcbfn; /* No. of I/Os in worker | ||
138 | * cbfn q | ||
139 | */ | ||
140 | uint32_t n_free_ioreq; /* No. of freelist entries */ | ||
141 | uint32_t n_free_ddp; /* No. of DDP freelist */ | ||
142 | uint32_t n_unaligned; /* No. of Unaligned SGls */ | ||
143 | uint32_t n_inval_cplop; /* No. invalid CPL op's in IQ */ | ||
144 | uint32_t n_inval_scsiop; /* No. invalid scsi op's in IQ*/ | ||
145 | }; | ||
146 | |||
147 | struct csio_scsim { | ||
148 | struct csio_hw *hw; /* Pointer to HW moduel */ | ||
149 | uint8_t max_sge; /* Max SGE */ | ||
150 | uint8_t proto_cmd_len; /* Proto specific SCSI | ||
151 | * cmd length | ||
152 | */ | ||
153 | uint16_t proto_rsp_len; /* Proto specific SCSI | ||
154 | * response length | ||
155 | */ | ||
156 | spinlock_t freelist_lock; /* Lock for ioreq freelist */ | ||
157 | struct list_head active_q; /* Outstanding SCSI I/Os */ | ||
158 | struct list_head ioreq_freelist; /* Free list of ioreq's */ | ||
159 | struct list_head ddp_freelist; /* DDP descriptor freelist */ | ||
160 | struct csio_scsi_stats stats; /* This module's statistics */ | ||
161 | }; | ||
162 | |||
163 | /* State machine defines */ | ||
164 | enum csio_scsi_ev { | ||
165 | CSIO_SCSIE_START_IO = 1, /* Start a regular SCSI IO */ | ||
166 | CSIO_SCSIE_START_TM, /* Start a TM IO */ | ||
167 | CSIO_SCSIE_COMPLETED, /* IO Completed */ | ||
168 | CSIO_SCSIE_ABORT, /* Abort IO */ | ||
169 | CSIO_SCSIE_ABORTED, /* IO Aborted */ | ||
170 | CSIO_SCSIE_CLOSE, /* Close exchange */ | ||
171 | CSIO_SCSIE_CLOSED, /* Exchange closed */ | ||
172 | CSIO_SCSIE_DRVCLEANUP, /* Driver wants to manually | ||
173 | * cleanup this I/O. | ||
174 | */ | ||
175 | }; | ||
176 | |||
177 | enum csio_scsi_lev { | ||
178 | CSIO_LEV_ALL = 1, | ||
179 | CSIO_LEV_LNODE, | ||
180 | CSIO_LEV_RNODE, | ||
181 | CSIO_LEV_LUN, | ||
182 | }; | ||
183 | |||
184 | struct csio_scsi_level_data { | ||
185 | enum csio_scsi_lev level; | ||
186 | struct csio_rnode *rnode; | ||
187 | struct csio_lnode *lnode; | ||
188 | uint64_t oslun; | ||
189 | }; | ||
190 | |||
191 | static inline struct csio_ioreq * | ||
192 | csio_get_scsi_ioreq(struct csio_scsim *scm) | ||
193 | { | ||
194 | struct csio_sm *req; | ||
195 | |||
196 | if (likely(!list_empty(&scm->ioreq_freelist))) { | ||
197 | req = list_first_entry(&scm->ioreq_freelist, | ||
198 | struct csio_sm, sm_list); | ||
199 | list_del_init(&req->sm_list); | ||
200 | CSIO_DEC_STATS(scm, n_free_ioreq); | ||
201 | return (struct csio_ioreq *)req; | ||
202 | } else | ||
203 | return NULL; | ||
204 | } | ||
205 | |||
206 | static inline void | ||
207 | csio_put_scsi_ioreq(struct csio_scsim *scm, struct csio_ioreq *ioreq) | ||
208 | { | ||
209 | list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist); | ||
210 | CSIO_INC_STATS(scm, n_free_ioreq); | ||
211 | } | ||
212 | |||
213 | static inline void | ||
214 | csio_put_scsi_ioreq_list(struct csio_scsim *scm, struct list_head *reqlist, | ||
215 | int n) | ||
216 | { | ||
217 | list_splice_init(reqlist, &scm->ioreq_freelist); | ||
218 | scm->stats.n_free_ioreq += n; | ||
219 | } | ||
220 | |||
221 | static inline struct csio_dma_buf * | ||
222 | csio_get_scsi_ddp(struct csio_scsim *scm) | ||
223 | { | ||
224 | struct csio_dma_buf *ddp; | ||
225 | |||
226 | if (likely(!list_empty(&scm->ddp_freelist))) { | ||
227 | ddp = list_first_entry(&scm->ddp_freelist, | ||
228 | struct csio_dma_buf, list); | ||
229 | list_del_init(&ddp->list); | ||
230 | CSIO_DEC_STATS(scm, n_free_ddp); | ||
231 | return ddp; | ||
232 | } else | ||
233 | return NULL; | ||
234 | } | ||
235 | |||
236 | static inline void | ||
237 | csio_put_scsi_ddp(struct csio_scsim *scm, struct csio_dma_buf *ddp) | ||
238 | { | ||
239 | list_add_tail(&ddp->list, &scm->ddp_freelist); | ||
240 | CSIO_INC_STATS(scm, n_free_ddp); | ||
241 | } | ||
242 | |||
243 | static inline void | ||
244 | csio_put_scsi_ddp_list(struct csio_scsim *scm, struct list_head *reqlist, | ||
245 | int n) | ||
246 | { | ||
247 | list_splice_tail_init(reqlist, &scm->ddp_freelist); | ||
248 | scm->stats.n_free_ddp += n; | ||
249 | } | ||
250 | |||
251 | static inline void | ||
252 | csio_scsi_completed(struct csio_ioreq *ioreq, struct list_head *cbfn_q) | ||
253 | { | ||
254 | csio_post_event(&ioreq->sm, CSIO_SCSIE_COMPLETED); | ||
255 | if (csio_list_deleted(&ioreq->sm.sm_list)) | ||
256 | list_add_tail(&ioreq->sm.sm_list, cbfn_q); | ||
257 | } | ||
258 | |||
259 | static inline void | ||
260 | csio_scsi_aborted(struct csio_ioreq *ioreq, struct list_head *cbfn_q) | ||
261 | { | ||
262 | csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORTED); | ||
263 | list_add_tail(&ioreq->sm.sm_list, cbfn_q); | ||
264 | } | ||
265 | |||
266 | static inline void | ||
267 | csio_scsi_closed(struct csio_ioreq *ioreq, struct list_head *cbfn_q) | ||
268 | { | ||
269 | csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSED); | ||
270 | list_add_tail(&ioreq->sm.sm_list, cbfn_q); | ||
271 | } | ||
272 | |||
273 | static inline void | ||
274 | csio_scsi_drvcleanup(struct csio_ioreq *ioreq) | ||
275 | { | ||
276 | csio_post_event(&ioreq->sm, CSIO_SCSIE_DRVCLEANUP); | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * csio_scsi_start_io - Kick starts the IO SM. | ||
281 | * @req: io request SM. | ||
282 | * | ||
283 | * needs to be called with lock held. | ||
284 | */ | ||
285 | static inline int | ||
286 | csio_scsi_start_io(struct csio_ioreq *ioreq) | ||
287 | { | ||
288 | csio_post_event(&ioreq->sm, CSIO_SCSIE_START_IO); | ||
289 | return ioreq->drv_status; | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * csio_scsi_start_tm - Kicks off the Task management IO SM. | ||
294 | * @req: io request SM. | ||
295 | * | ||
296 | * needs to be called with lock held. | ||
297 | */ | ||
298 | static inline int | ||
299 | csio_scsi_start_tm(struct csio_ioreq *ioreq) | ||
300 | { | ||
301 | csio_post_event(&ioreq->sm, CSIO_SCSIE_START_TM); | ||
302 | return ioreq->drv_status; | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * csio_scsi_abort - Abort an IO request | ||
307 | * @req: io request SM. | ||
308 | * | ||
309 | * needs to be called with lock held. | ||
310 | */ | ||
311 | static inline int | ||
312 | csio_scsi_abort(struct csio_ioreq *ioreq) | ||
313 | { | ||
314 | csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORT); | ||
315 | return ioreq->drv_status; | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * csio_scsi_close - Close an IO request | ||
320 | * @req: io request SM. | ||
321 | * | ||
322 | * needs to be called with lock held. | ||
323 | */ | ||
324 | static inline int | ||
325 | csio_scsi_close(struct csio_ioreq *ioreq) | ||
326 | { | ||
327 | csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSE); | ||
328 | return ioreq->drv_status; | ||
329 | } | ||
330 | |||
331 | void csio_scsi_cleanup_io_q(struct csio_scsim *, struct list_head *); | ||
332 | int csio_scsim_cleanup_io(struct csio_scsim *, bool abort); | ||
333 | int csio_scsim_cleanup_io_lnode(struct csio_scsim *, | ||
334 | struct csio_lnode *); | ||
335 | struct csio_ioreq *csio_scsi_cmpl_handler(struct csio_hw *, void *, uint32_t, | ||
336 | struct csio_fl_dma_buf *, | ||
337 | void *, uint8_t **); | ||
338 | int csio_scsi_qconfig(struct csio_hw *); | ||
339 | int csio_scsim_init(struct csio_scsim *, struct csio_hw *); | ||
340 | void csio_scsim_exit(struct csio_scsim *); | ||
341 | |||
342 | #endif /* __CSIO_SCSI_H__ */ | ||
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c new file mode 100644 index 000000000000..c32df1bdaa97 --- /dev/null +++ b/drivers/scsi/csiostor/csio_wr.c | |||
@@ -0,0 +1,1632 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/string.h> | ||
37 | #include <linux/compiler.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <asm/page.h> | ||
40 | #include <linux/cache.h> | ||
41 | |||
42 | #include "csio_hw.h" | ||
43 | #include "csio_wr.h" | ||
44 | #include "csio_mb.h" | ||
45 | #include "csio_defs.h" | ||
46 | |||
47 | int csio_intr_coalesce_cnt; /* value:SGE_INGRESS_RX_THRESHOLD[0] */ | ||
48 | static int csio_sge_thresh_reg; /* SGE_INGRESS_RX_THRESHOLD[0] */ | ||
49 | |||
50 | int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */ | ||
51 | static int csio_sge_timer_reg = 1; | ||
52 | |||
53 | #define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \ | ||
54 | csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg) | ||
55 | |||
56 | static void | ||
57 | csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg) | ||
58 | { | ||
59 | sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 + | ||
60 | reg * sizeof(uint32_t)); | ||
61 | } | ||
62 | |||
63 | /* Free list buffer size */ | ||
64 | static inline uint32_t | ||
65 | csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf) | ||
66 | { | ||
67 | return sge->sge_fl_buf_size[buf->paddr & 0xF]; | ||
68 | } | ||
69 | |||
70 | /* Size of the egress queue status page */ | ||
71 | static inline uint32_t | ||
72 | csio_wr_qstat_pgsz(struct csio_hw *hw) | ||
73 | { | ||
74 | return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ? 128 : 64; | ||
75 | } | ||
76 | |||
77 | /* Ring freelist doorbell */ | ||
78 | static inline void | ||
79 | csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq) | ||
80 | { | ||
81 | /* | ||
82 | * Ring the doorbell only when we have atleast CSIO_QCREDIT_SZ | ||
83 | * number of bytes in the freelist queue. This translates to atleast | ||
84 | * 8 freelist buffer pointers (since each pointer is 8 bytes). | ||
85 | */ | ||
86 | if (flq->inc_idx >= 8) { | ||
87 | csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) | | ||
88 | PIDX(flq->inc_idx / 8), | ||
89 | MYPF_REG(SGE_PF_KDOORBELL)); | ||
90 | flq->inc_idx &= 7; | ||
91 | } | ||
92 | } | ||
93 | |||
94 | /* Write a 0 cidx increment value to enable SGE interrupts for this queue */ | ||
95 | static void | ||
96 | csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid) | ||
97 | { | ||
98 | csio_wr_reg32(hw, CIDXINC(0) | | ||
99 | INGRESSQID(iqid) | | ||
100 | TIMERREG(X_TIMERREG_RESTART_COUNTER), | ||
101 | MYPF_REG(SGE_PF_GTS)); | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * csio_wr_fill_fl - Populate the FL buffers of a FL queue. | ||
106 | * @hw: HW module. | ||
107 | * @flq: Freelist queue. | ||
108 | * | ||
109 | * Fill up freelist buffer entries with buffers of size specified | ||
110 | * in the size register. | ||
111 | * | ||
112 | */ | ||
113 | static int | ||
114 | csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq) | ||
115 | { | ||
116 | struct csio_wrm *wrm = csio_hw_to_wrm(hw); | ||
117 | struct csio_sge *sge = &wrm->sge; | ||
118 | __be64 *d = (__be64 *)(flq->vstart); | ||
119 | struct csio_dma_buf *buf = &flq->un.fl.bufs[0]; | ||
120 | uint64_t paddr; | ||
121 | int sreg = flq->un.fl.sreg; | ||
122 | int n = flq->credits; | ||
123 | |||
124 | while (n--) { | ||
125 | buf->len = sge->sge_fl_buf_size[sreg]; | ||
126 | buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len, | ||
127 | &buf->paddr); | ||
128 | if (!buf->vaddr) { | ||
129 | csio_err(hw, "Could only fill %d buffers!\n", n + 1); | ||
130 | return -ENOMEM; | ||
131 | } | ||
132 | |||
133 | paddr = buf->paddr | (sreg & 0xF); | ||
134 | |||
135 | *d++ = cpu_to_be64(paddr); | ||
136 | buf++; | ||
137 | } | ||
138 | |||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * csio_wr_update_fl - | ||
144 | * @hw: HW module. | ||
145 | * @flq: Freelist queue. | ||
146 | * | ||
147 | * | ||
148 | */ | ||
149 | static inline void | ||
150 | csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n) | ||
151 | { | ||
152 | |||
153 | flq->inc_idx += n; | ||
154 | flq->pidx += n; | ||
155 | if (unlikely(flq->pidx >= flq->credits)) | ||
156 | flq->pidx -= (uint16_t)flq->credits; | ||
157 | |||
158 | CSIO_INC_STATS(flq, n_flq_refill); | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * csio_wr_alloc_q - Allocate a WR queue and initialize it. | ||
163 | * @hw: HW module | ||
164 | * @qsize: Size of the queue in bytes | ||
165 | * @wrsize: Since of WR in this queue, if fixed. | ||
166 | * @type: Type of queue (Ingress/Egress/Freelist) | ||
167 | * @owner: Module that owns this queue. | ||
168 | * @nflb: Number of freelist buffers for FL. | ||
169 | * @sreg: What is the FL buffer size register? | ||
170 | * @iq_int_handler: Ingress queue handler in INTx mode. | ||
171 | * | ||
172 | * This function allocates and sets up a queue for the caller | ||
173 | * of size qsize, aligned at the required boundary. This is subject to | ||
174 | * be free entries being available in the queue array. If one is found, | ||
175 | * it is initialized with the allocated queue, marked as being used (owner), | ||
176 | * and a handle returned to the caller in form of the queue's index | ||
177 | * into the q_arr array. | ||
178 | * If user has indicated a freelist (by specifying nflb > 0), create | ||
179 | * another queue (with its own index into q_arr) for the freelist. Allocate | ||
180 | * memory for DMA buffer metadata (vaddr, len etc). Save off the freelist | ||
181 | * idx in the ingress queue's flq.idx. This is how a Freelist is associated | ||
182 | * with its owning ingress queue. | ||
183 | */ | ||
184 | int | ||
185 | csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize, | ||
186 | uint16_t type, void *owner, uint32_t nflb, int sreg, | ||
187 | iq_handler_t iq_intx_handler) | ||
188 | { | ||
189 | struct csio_wrm *wrm = csio_hw_to_wrm(hw); | ||
190 | struct csio_q *q, *flq; | ||
191 | int free_idx = wrm->free_qidx; | ||
192 | int ret_idx = free_idx; | ||
193 | uint32_t qsz; | ||
194 | int flq_idx; | ||
195 | |||
196 | if (free_idx >= wrm->num_q) { | ||
197 | csio_err(hw, "No more free queues.\n"); | ||
198 | return -1; | ||
199 | } | ||
200 | |||
201 | switch (type) { | ||
202 | case CSIO_EGRESS: | ||
203 | qsz = ALIGN(qsize, CSIO_QCREDIT_SZ) + csio_wr_qstat_pgsz(hw); | ||
204 | break; | ||
205 | case CSIO_INGRESS: | ||
206 | switch (wrsize) { | ||
207 | case 16: | ||
208 | case 32: | ||
209 | case 64: | ||
210 | case 128: | ||
211 | break; | ||
212 | default: | ||
213 | csio_err(hw, "Invalid Ingress queue WR size:%d\n", | ||
214 | wrsize); | ||
215 | return -1; | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * Number of elements must be a multiple of 16 | ||
220 | * So this includes status page size | ||
221 | */ | ||
222 | qsz = ALIGN(qsize/wrsize, 16) * wrsize; | ||
223 | |||
224 | break; | ||
225 | case CSIO_FREELIST: | ||
226 | qsz = ALIGN(qsize/wrsize, 8) * wrsize + csio_wr_qstat_pgsz(hw); | ||
227 | break; | ||
228 | default: | ||
229 | csio_err(hw, "Invalid queue type: 0x%x\n", type); | ||
230 | return -1; | ||
231 | } | ||
232 | |||
233 | q = wrm->q_arr[free_idx]; | ||
234 | |||
235 | q->vstart = pci_alloc_consistent(hw->pdev, qsz, &q->pstart); | ||
236 | if (!q->vstart) { | ||
237 | csio_err(hw, | ||
238 | "Failed to allocate DMA memory for " | ||
239 | "queue at id: %d size: %d\n", free_idx, qsize); | ||
240 | return -1; | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * We need to zero out the contents, importantly for ingress, | ||
245 | * since we start with a generatiom bit of 1 for ingress. | ||
246 | */ | ||
247 | memset(q->vstart, 0, qsz); | ||
248 | |||
249 | q->type = type; | ||
250 | q->owner = owner; | ||
251 | q->pidx = q->cidx = q->inc_idx = 0; | ||
252 | q->size = qsz; | ||
253 | q->wr_sz = wrsize; /* If using fixed size WRs */ | ||
254 | |||
255 | wrm->free_qidx++; | ||
256 | |||
257 | if (type == CSIO_INGRESS) { | ||
258 | /* Since queue area is set to zero */ | ||
259 | q->un.iq.genbit = 1; | ||
260 | |||
261 | /* | ||
262 | * Ingress queue status page size is always the size of | ||
263 | * the ingress queue entry. | ||
264 | */ | ||
265 | q->credits = (qsz - q->wr_sz) / q->wr_sz; | ||
266 | q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz | ||
267 | - q->wr_sz); | ||
268 | |||
269 | /* Allocate memory for FL if requested */ | ||
270 | if (nflb > 0) { | ||
271 | flq_idx = csio_wr_alloc_q(hw, nflb * sizeof(__be64), | ||
272 | sizeof(__be64), CSIO_FREELIST, | ||
273 | owner, 0, sreg, NULL); | ||
274 | if (flq_idx == -1) { | ||
275 | csio_err(hw, | ||
276 | "Failed to allocate FL queue" | ||
277 | " for IQ idx:%d\n", free_idx); | ||
278 | return -1; | ||
279 | } | ||
280 | |||
281 | /* Associate the new FL with the Ingress quue */ | ||
282 | q->un.iq.flq_idx = flq_idx; | ||
283 | |||
284 | flq = wrm->q_arr[q->un.iq.flq_idx]; | ||
285 | flq->un.fl.bufs = kzalloc(flq->credits * | ||
286 | sizeof(struct csio_dma_buf), | ||
287 | GFP_KERNEL); | ||
288 | if (!flq->un.fl.bufs) { | ||
289 | csio_err(hw, | ||
290 | "Failed to allocate FL queue bufs" | ||
291 | " for IQ idx:%d\n", free_idx); | ||
292 | return -1; | ||
293 | } | ||
294 | |||
295 | flq->un.fl.packen = 0; | ||
296 | flq->un.fl.offset = 0; | ||
297 | flq->un.fl.sreg = sreg; | ||
298 | |||
299 | /* Fill up the free list buffers */ | ||
300 | if (csio_wr_fill_fl(hw, flq)) | ||
301 | return -1; | ||
302 | |||
303 | /* | ||
304 | * Make sure in a FLQ, atleast 1 credit (8 FL buffers) | ||
305 | * remains unpopulated,otherwise HW thinks | ||
306 | * FLQ is empty. | ||
307 | */ | ||
308 | flq->pidx = flq->inc_idx = flq->credits - 8; | ||
309 | } else { | ||
310 | q->un.iq.flq_idx = -1; | ||
311 | } | ||
312 | |||
313 | /* Associate the IQ INTx handler. */ | ||
314 | q->un.iq.iq_intx_handler = iq_intx_handler; | ||
315 | |||
316 | csio_q_iqid(hw, ret_idx) = CSIO_MAX_QID; | ||
317 | |||
318 | } else if (type == CSIO_EGRESS) { | ||
319 | q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ; | ||
320 | q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz | ||
321 | - csio_wr_qstat_pgsz(hw)); | ||
322 | csio_q_eqid(hw, ret_idx) = CSIO_MAX_QID; | ||
323 | } else { /* Freelist */ | ||
324 | q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64); | ||
325 | q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz | ||
326 | - csio_wr_qstat_pgsz(hw)); | ||
327 | csio_q_flid(hw, ret_idx) = CSIO_MAX_QID; | ||
328 | } | ||
329 | |||
330 | return ret_idx; | ||
331 | } | ||
332 | |||
333 | /* | ||
334 | * csio_wr_iq_create_rsp - Response handler for IQ creation. | ||
335 | * @hw: The HW module. | ||
336 | * @mbp: Mailbox. | ||
337 | * @iq_idx: Ingress queue that got created. | ||
338 | * | ||
339 | * Handle FW_IQ_CMD mailbox completion. Save off the assigned IQ/FL ids. | ||
340 | */ | ||
341 | static int | ||
342 | csio_wr_iq_create_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx) | ||
343 | { | ||
344 | struct csio_iq_params iqp; | ||
345 | enum fw_retval retval; | ||
346 | uint32_t iq_id; | ||
347 | int flq_idx; | ||
348 | |||
349 | memset(&iqp, 0, sizeof(struct csio_iq_params)); | ||
350 | |||
351 | csio_mb_iq_alloc_write_rsp(hw, mbp, &retval, &iqp); | ||
352 | |||
353 | if (retval != FW_SUCCESS) { | ||
354 | csio_err(hw, "IQ cmd returned 0x%x!\n", retval); | ||
355 | mempool_free(mbp, hw->mb_mempool); | ||
356 | return -EINVAL; | ||
357 | } | ||
358 | |||
359 | csio_q_iqid(hw, iq_idx) = iqp.iqid; | ||
360 | csio_q_physiqid(hw, iq_idx) = iqp.physiqid; | ||
361 | csio_q_pidx(hw, iq_idx) = csio_q_cidx(hw, iq_idx) = 0; | ||
362 | csio_q_inc_idx(hw, iq_idx) = 0; | ||
363 | |||
364 | /* Actual iq-id. */ | ||
365 | iq_id = iqp.iqid - hw->wrm.fw_iq_start; | ||
366 | |||
367 | /* Set the iq-id to iq map table. */ | ||
368 | if (iq_id >= CSIO_MAX_IQ) { | ||
369 | csio_err(hw, | ||
370 | "Exceeding MAX_IQ(%d) supported!" | ||
371 | " iqid:%d rel_iqid:%d FW iq_start:%d\n", | ||
372 | CSIO_MAX_IQ, iq_id, iqp.iqid, hw->wrm.fw_iq_start); | ||
373 | mempool_free(mbp, hw->mb_mempool); | ||
374 | return -EINVAL; | ||
375 | } | ||
376 | csio_q_set_intr_map(hw, iq_idx, iq_id); | ||
377 | |||
378 | /* | ||
379 | * During FW_IQ_CMD, FW sets interrupt_sent bit to 1 in the SGE | ||
380 | * ingress context of this queue. This will block interrupts to | ||
381 | * this queue until the next GTS write. Therefore, we do a | ||
382 | * 0-cidx increment GTS write for this queue just to clear the | ||
383 | * interrupt_sent bit. This will re-enable interrupts to this | ||
384 | * queue. | ||
385 | */ | ||
386 | csio_wr_sge_intr_enable(hw, iqp.physiqid); | ||
387 | |||
388 | flq_idx = csio_q_iq_flq_idx(hw, iq_idx); | ||
389 | if (flq_idx != -1) { | ||
390 | struct csio_q *flq = hw->wrm.q_arr[flq_idx]; | ||
391 | |||
392 | csio_q_flid(hw, flq_idx) = iqp.fl0id; | ||
393 | csio_q_cidx(hw, flq_idx) = 0; | ||
394 | csio_q_pidx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8; | ||
395 | csio_q_inc_idx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8; | ||
396 | |||
397 | /* Now update SGE about the buffers allocated during init */ | ||
398 | csio_wr_ring_fldb(hw, flq); | ||
399 | } | ||
400 | |||
401 | mempool_free(mbp, hw->mb_mempool); | ||
402 | |||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * csio_wr_iq_create - Configure an Ingress queue with FW. | ||
408 | * @hw: The HW module. | ||
409 | * @priv: Private data object. | ||
410 | * @iq_idx: Ingress queue index in the WR module. | ||
411 | * @vec: MSIX vector. | ||
412 | * @portid: PCIE Channel to be associated with this queue. | ||
413 | * @async: Is this a FW asynchronous message handling queue? | ||
414 | * @cbfn: Completion callback. | ||
415 | * | ||
416 | * This API configures an ingress queue with FW by issuing a FW_IQ_CMD mailbox | ||
417 | * with alloc/write bits set. | ||
418 | */ | ||
419 | int | ||
420 | csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx, | ||
421 | uint32_t vec, uint8_t portid, bool async, | ||
422 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
423 | { | ||
424 | struct csio_mb *mbp; | ||
425 | struct csio_iq_params iqp; | ||
426 | int flq_idx; | ||
427 | |||
428 | memset(&iqp, 0, sizeof(struct csio_iq_params)); | ||
429 | csio_q_portid(hw, iq_idx) = portid; | ||
430 | |||
431 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
432 | if (!mbp) { | ||
433 | csio_err(hw, "IQ command out of memory!\n"); | ||
434 | return -ENOMEM; | ||
435 | } | ||
436 | |||
437 | switch (hw->intr_mode) { | ||
438 | case CSIO_IM_INTX: | ||
439 | case CSIO_IM_MSI: | ||
440 | /* For interrupt forwarding queue only */ | ||
441 | if (hw->intr_iq_idx == iq_idx) | ||
442 | iqp.iqandst = X_INTERRUPTDESTINATION_PCIE; | ||
443 | else | ||
444 | iqp.iqandst = X_INTERRUPTDESTINATION_IQ; | ||
445 | iqp.iqandstindex = | ||
446 | csio_q_physiqid(hw, hw->intr_iq_idx); | ||
447 | break; | ||
448 | case CSIO_IM_MSIX: | ||
449 | iqp.iqandst = X_INTERRUPTDESTINATION_PCIE; | ||
450 | iqp.iqandstindex = (uint16_t)vec; | ||
451 | break; | ||
452 | case CSIO_IM_NONE: | ||
453 | mempool_free(mbp, hw->mb_mempool); | ||
454 | return -EINVAL; | ||
455 | } | ||
456 | |||
457 | /* Pass in the ingress queue cmd parameters */ | ||
458 | iqp.pfn = hw->pfn; | ||
459 | iqp.vfn = 0; | ||
460 | iqp.iq_start = 1; | ||
461 | iqp.viid = 0; | ||
462 | iqp.type = FW_IQ_TYPE_FL_INT_CAP; | ||
463 | iqp.iqasynch = async; | ||
464 | if (csio_intr_coalesce_cnt) | ||
465 | iqp.iqanus = X_UPDATESCHEDULING_COUNTER_OPTTIMER; | ||
466 | else | ||
467 | iqp.iqanus = X_UPDATESCHEDULING_TIMER; | ||
468 | iqp.iqanud = X_UPDATEDELIVERY_INTERRUPT; | ||
469 | iqp.iqpciech = portid; | ||
470 | iqp.iqintcntthresh = (uint8_t)csio_sge_thresh_reg; | ||
471 | |||
472 | switch (csio_q_wr_sz(hw, iq_idx)) { | ||
473 | case 16: | ||
474 | iqp.iqesize = 0; break; | ||
475 | case 32: | ||
476 | iqp.iqesize = 1; break; | ||
477 | case 64: | ||
478 | iqp.iqesize = 2; break; | ||
479 | case 128: | ||
480 | iqp.iqesize = 3; break; | ||
481 | } | ||
482 | |||
483 | iqp.iqsize = csio_q_size(hw, iq_idx) / | ||
484 | csio_q_wr_sz(hw, iq_idx); | ||
485 | iqp.iqaddr = csio_q_pstart(hw, iq_idx); | ||
486 | |||
487 | flq_idx = csio_q_iq_flq_idx(hw, iq_idx); | ||
488 | if (flq_idx != -1) { | ||
489 | struct csio_q *flq = hw->wrm.q_arr[flq_idx]; | ||
490 | |||
491 | iqp.fl0paden = 1; | ||
492 | iqp.fl0packen = flq->un.fl.packen ? 1 : 0; | ||
493 | iqp.fl0fbmin = X_FETCHBURSTMIN_64B; | ||
494 | iqp.fl0fbmax = X_FETCHBURSTMAX_512B; | ||
495 | iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ; | ||
496 | iqp.fl0addr = csio_q_pstart(hw, flq_idx); | ||
497 | } | ||
498 | |||
499 | csio_mb_iq_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn); | ||
500 | |||
501 | if (csio_mb_issue(hw, mbp)) { | ||
502 | csio_err(hw, "Issue of IQ cmd failed!\n"); | ||
503 | mempool_free(mbp, hw->mb_mempool); | ||
504 | return -EINVAL; | ||
505 | } | ||
506 | |||
507 | if (cbfn != NULL) | ||
508 | return 0; | ||
509 | |||
510 | return csio_wr_iq_create_rsp(hw, mbp, iq_idx); | ||
511 | } | ||
512 | |||
513 | /* | ||
514 | * csio_wr_eq_create_rsp - Response handler for EQ creation. | ||
515 | * @hw: The HW module. | ||
516 | * @mbp: Mailbox. | ||
517 | * @eq_idx: Egress queue that got created. | ||
518 | * | ||
519 | * Handle FW_EQ_OFLD_CMD mailbox completion. Save off the assigned EQ ids. | ||
520 | */ | ||
521 | static int | ||
522 | csio_wr_eq_cfg_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx) | ||
523 | { | ||
524 | struct csio_eq_params eqp; | ||
525 | enum fw_retval retval; | ||
526 | |||
527 | memset(&eqp, 0, sizeof(struct csio_eq_params)); | ||
528 | |||
529 | csio_mb_eq_ofld_alloc_write_rsp(hw, mbp, &retval, &eqp); | ||
530 | |||
531 | if (retval != FW_SUCCESS) { | ||
532 | csio_err(hw, "EQ OFLD cmd returned 0x%x!\n", retval); | ||
533 | mempool_free(mbp, hw->mb_mempool); | ||
534 | return -EINVAL; | ||
535 | } | ||
536 | |||
537 | csio_q_eqid(hw, eq_idx) = (uint16_t)eqp.eqid; | ||
538 | csio_q_physeqid(hw, eq_idx) = (uint16_t)eqp.physeqid; | ||
539 | csio_q_pidx(hw, eq_idx) = csio_q_cidx(hw, eq_idx) = 0; | ||
540 | csio_q_inc_idx(hw, eq_idx) = 0; | ||
541 | |||
542 | mempool_free(mbp, hw->mb_mempool); | ||
543 | |||
544 | return 0; | ||
545 | } | ||
546 | |||
547 | /* | ||
548 | * csio_wr_eq_create - Configure an Egress queue with FW. | ||
549 | * @hw: HW module. | ||
550 | * @priv: Private data. | ||
551 | * @eq_idx: Egress queue index in the WR module. | ||
552 | * @iq_idx: Associated ingress queue index. | ||
553 | * @cbfn: Completion callback. | ||
554 | * | ||
555 | * This API configures a offload egress queue with FW by issuing a | ||
556 | * FW_EQ_OFLD_CMD (with alloc + write ) mailbox. | ||
557 | */ | ||
558 | int | ||
559 | csio_wr_eq_create(struct csio_hw *hw, void *priv, int eq_idx, | ||
560 | int iq_idx, uint8_t portid, | ||
561 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
562 | { | ||
563 | struct csio_mb *mbp; | ||
564 | struct csio_eq_params eqp; | ||
565 | |||
566 | memset(&eqp, 0, sizeof(struct csio_eq_params)); | ||
567 | |||
568 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
569 | if (!mbp) { | ||
570 | csio_err(hw, "EQ command out of memory!\n"); | ||
571 | return -ENOMEM; | ||
572 | } | ||
573 | |||
574 | eqp.pfn = hw->pfn; | ||
575 | eqp.vfn = 0; | ||
576 | eqp.eqstart = 1; | ||
577 | eqp.hostfcmode = X_HOSTFCMODE_STATUS_PAGE; | ||
578 | eqp.iqid = csio_q_iqid(hw, iq_idx); | ||
579 | eqp.fbmin = X_FETCHBURSTMIN_64B; | ||
580 | eqp.fbmax = X_FETCHBURSTMAX_512B; | ||
581 | eqp.cidxfthresh = 0; | ||
582 | eqp.pciechn = portid; | ||
583 | eqp.eqsize = csio_q_size(hw, eq_idx) / CSIO_QCREDIT_SZ; | ||
584 | eqp.eqaddr = csio_q_pstart(hw, eq_idx); | ||
585 | |||
586 | csio_mb_eq_ofld_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, | ||
587 | &eqp, cbfn); | ||
588 | |||
589 | if (csio_mb_issue(hw, mbp)) { | ||
590 | csio_err(hw, "Issue of EQ OFLD cmd failed!\n"); | ||
591 | mempool_free(mbp, hw->mb_mempool); | ||
592 | return -EINVAL; | ||
593 | } | ||
594 | |||
595 | if (cbfn != NULL) | ||
596 | return 0; | ||
597 | |||
598 | return csio_wr_eq_cfg_rsp(hw, mbp, eq_idx); | ||
599 | } | ||
600 | |||
601 | /* | ||
602 | * csio_wr_iq_destroy_rsp - Response handler for IQ removal. | ||
603 | * @hw: The HW module. | ||
604 | * @mbp: Mailbox. | ||
605 | * @iq_idx: Ingress queue that was freed. | ||
606 | * | ||
607 | * Handle FW_IQ_CMD (free) mailbox completion. | ||
608 | */ | ||
609 | static int | ||
610 | csio_wr_iq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx) | ||
611 | { | ||
612 | enum fw_retval retval = csio_mb_fw_retval(mbp); | ||
613 | int rv = 0; | ||
614 | |||
615 | if (retval != FW_SUCCESS) | ||
616 | rv = -EINVAL; | ||
617 | |||
618 | mempool_free(mbp, hw->mb_mempool); | ||
619 | |||
620 | return rv; | ||
621 | } | ||
622 | |||
623 | /* | ||
624 | * csio_wr_iq_destroy - Free an ingress queue. | ||
625 | * @hw: The HW module. | ||
626 | * @priv: Private data object. | ||
627 | * @iq_idx: Ingress queue index to destroy | ||
628 | * @cbfn: Completion callback. | ||
629 | * | ||
630 | * This API frees an ingress queue by issuing the FW_IQ_CMD | ||
631 | * with the free bit set. | ||
632 | */ | ||
633 | static int | ||
634 | csio_wr_iq_destroy(struct csio_hw *hw, void *priv, int iq_idx, | ||
635 | void (*cbfn)(struct csio_hw *, struct csio_mb *)) | ||
636 | { | ||
637 | int rv = 0; | ||
638 | struct csio_mb *mbp; | ||
639 | struct csio_iq_params iqp; | ||
640 | int flq_idx; | ||
641 | |||
642 | memset(&iqp, 0, sizeof(struct csio_iq_params)); | ||
643 | |||
644 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
645 | if (!mbp) | ||
646 | return -ENOMEM; | ||
647 | |||
648 | iqp.pfn = hw->pfn; | ||
649 | iqp.vfn = 0; | ||
650 | iqp.iqid = csio_q_iqid(hw, iq_idx); | ||
651 | iqp.type = FW_IQ_TYPE_FL_INT_CAP; | ||
652 | |||
653 | flq_idx = csio_q_iq_flq_idx(hw, iq_idx); | ||
654 | if (flq_idx != -1) | ||
655 | iqp.fl0id = csio_q_flid(hw, flq_idx); | ||
656 | else | ||
657 | iqp.fl0id = 0xFFFF; | ||
658 | |||
659 | iqp.fl1id = 0xFFFF; | ||
660 | |||
661 | csio_mb_iq_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn); | ||
662 | |||
663 | rv = csio_mb_issue(hw, mbp); | ||
664 | if (rv != 0) { | ||
665 | mempool_free(mbp, hw->mb_mempool); | ||
666 | return rv; | ||
667 | } | ||
668 | |||
669 | if (cbfn != NULL) | ||
670 | return 0; | ||
671 | |||
672 | return csio_wr_iq_destroy_rsp(hw, mbp, iq_idx); | ||
673 | } | ||
674 | |||
675 | /* | ||
676 | * csio_wr_eq_destroy_rsp - Response handler for OFLD EQ creation. | ||
677 | * @hw: The HW module. | ||
678 | * @mbp: Mailbox. | ||
679 | * @eq_idx: Egress queue that was freed. | ||
680 | * | ||
681 | * Handle FW_OFLD_EQ_CMD (free) mailbox completion. | ||
682 | */ | ||
683 | static int | ||
684 | csio_wr_eq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx) | ||
685 | { | ||
686 | enum fw_retval retval = csio_mb_fw_retval(mbp); | ||
687 | int rv = 0; | ||
688 | |||
689 | if (retval != FW_SUCCESS) | ||
690 | rv = -EINVAL; | ||
691 | |||
692 | mempool_free(mbp, hw->mb_mempool); | ||
693 | |||
694 | return rv; | ||
695 | } | ||
696 | |||
697 | /* | ||
698 | * csio_wr_eq_destroy - Free an Egress queue. | ||
699 | * @hw: The HW module. | ||
700 | * @priv: Private data object. | ||
701 | * @eq_idx: Egress queue index to destroy | ||
702 | * @cbfn: Completion callback. | ||
703 | * | ||
704 | * This API frees an Egress queue by issuing the FW_EQ_OFLD_CMD | ||
705 | * with the free bit set. | ||
706 | */ | ||
707 | static int | ||
708 | csio_wr_eq_destroy(struct csio_hw *hw, void *priv, int eq_idx, | ||
709 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | ||
710 | { | ||
711 | int rv = 0; | ||
712 | struct csio_mb *mbp; | ||
713 | struct csio_eq_params eqp; | ||
714 | |||
715 | memset(&eqp, 0, sizeof(struct csio_eq_params)); | ||
716 | |||
717 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | ||
718 | if (!mbp) | ||
719 | return -ENOMEM; | ||
720 | |||
721 | eqp.pfn = hw->pfn; | ||
722 | eqp.vfn = 0; | ||
723 | eqp.eqid = csio_q_eqid(hw, eq_idx); | ||
724 | |||
725 | csio_mb_eq_ofld_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &eqp, cbfn); | ||
726 | |||
727 | rv = csio_mb_issue(hw, mbp); | ||
728 | if (rv != 0) { | ||
729 | mempool_free(mbp, hw->mb_mempool); | ||
730 | return rv; | ||
731 | } | ||
732 | |||
733 | if (cbfn != NULL) | ||
734 | return 0; | ||
735 | |||
736 | return csio_wr_eq_destroy_rsp(hw, mbp, eq_idx); | ||
737 | } | ||
738 | |||
739 | /* | ||
740 | * csio_wr_cleanup_eq_stpg - Cleanup Egress queue status page | ||
741 | * @hw: HW module | ||
742 | * @qidx: Egress queue index | ||
743 | * | ||
744 | * Cleanup the Egress queue status page. | ||
745 | */ | ||
746 | static void | ||
747 | csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx) | ||
748 | { | ||
749 | struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx]; | ||
750 | struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap; | ||
751 | |||
752 | memset(stp, 0, sizeof(*stp)); | ||
753 | } | ||
754 | |||
755 | /* | ||
756 | * csio_wr_cleanup_iq_ftr - Cleanup Footer entries in IQ | ||
757 | * @hw: HW module | ||
758 | * @qidx: Ingress queue index | ||
759 | * | ||
760 | * Cleanup the footer entries in the given ingress queue, | ||
761 | * set to 1 the internal copy of genbit. | ||
762 | */ | ||
763 | static void | ||
764 | csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx) | ||
765 | { | ||
766 | struct csio_wrm *wrm = csio_hw_to_wrm(hw); | ||
767 | struct csio_q *q = wrm->q_arr[qidx]; | ||
768 | void *wr; | ||
769 | struct csio_iqwr_footer *ftr; | ||
770 | uint32_t i = 0; | ||
771 | |||
772 | /* set to 1 since we are just about zero out genbit */ | ||
773 | q->un.iq.genbit = 1; | ||
774 | |||
775 | for (i = 0; i < q->credits; i++) { | ||
776 | /* Get the WR */ | ||
777 | wr = (void *)((uintptr_t)q->vstart + | ||
778 | (i * q->wr_sz)); | ||
779 | /* Get the footer */ | ||
780 | ftr = (struct csio_iqwr_footer *)((uintptr_t)wr + | ||
781 | (q->wr_sz - sizeof(*ftr))); | ||
782 | /* Zero out footer */ | ||
783 | memset(ftr, 0, sizeof(*ftr)); | ||
784 | } | ||
785 | } | ||
786 | |||
787 | int | ||
788 | csio_wr_destroy_queues(struct csio_hw *hw, bool cmd) | ||
789 | { | ||
790 | int i, flq_idx; | ||
791 | struct csio_q *q; | ||
792 | struct csio_wrm *wrm = csio_hw_to_wrm(hw); | ||
793 | int rv; | ||
794 | |||
795 | for (i = 0; i < wrm->free_qidx; i++) { | ||
796 | q = wrm->q_arr[i]; | ||
797 | |||
798 | switch (q->type) { | ||
799 | case CSIO_EGRESS: | ||
800 | if (csio_q_eqid(hw, i) != CSIO_MAX_QID) { | ||
801 | csio_wr_cleanup_eq_stpg(hw, i); | ||
802 | if (!cmd) { | ||
803 | csio_q_eqid(hw, i) = CSIO_MAX_QID; | ||
804 | continue; | ||
805 | } | ||
806 | |||
807 | rv = csio_wr_eq_destroy(hw, NULL, i, NULL); | ||
808 | if ((rv == -EBUSY) || (rv == -ETIMEDOUT)) | ||
809 | cmd = false; | ||
810 | |||
811 | csio_q_eqid(hw, i) = CSIO_MAX_QID; | ||
812 | } | ||
813 | case CSIO_INGRESS: | ||
814 | if (csio_q_iqid(hw, i) != CSIO_MAX_QID) { | ||
815 | csio_wr_cleanup_iq_ftr(hw, i); | ||
816 | if (!cmd) { | ||
817 | csio_q_iqid(hw, i) = CSIO_MAX_QID; | ||
818 | flq_idx = csio_q_iq_flq_idx(hw, i); | ||
819 | if (flq_idx != -1) | ||
820 | csio_q_flid(hw, flq_idx) = | ||
821 | CSIO_MAX_QID; | ||
822 | continue; | ||
823 | } | ||
824 | |||
825 | rv = csio_wr_iq_destroy(hw, NULL, i, NULL); | ||
826 | if ((rv == -EBUSY) || (rv == -ETIMEDOUT)) | ||
827 | cmd = false; | ||
828 | |||
829 | csio_q_iqid(hw, i) = CSIO_MAX_QID; | ||
830 | flq_idx = csio_q_iq_flq_idx(hw, i); | ||
831 | if (flq_idx != -1) | ||
832 | csio_q_flid(hw, flq_idx) = CSIO_MAX_QID; | ||
833 | } | ||
834 | default: | ||
835 | break; | ||
836 | } | ||
837 | } | ||
838 | |||
839 | hw->flags &= ~CSIO_HWF_Q_FW_ALLOCED; | ||
840 | |||
841 | return 0; | ||
842 | } | ||
843 | |||
844 | /* | ||
845 | * csio_wr_get - Get requested size of WR entry/entries from queue. | ||
846 | * @hw: HW module. | ||
847 | * @qidx: Index of queue. | ||
848 | * @size: Cumulative size of Work request(s). | ||
849 | * @wrp: Work request pair. | ||
850 | * | ||
851 | * If requested credits are available, return the start address of the | ||
852 | * work request in the work request pair. Set pidx accordingly and | ||
853 | * return. | ||
854 | * | ||
855 | * NOTE about WR pair: | ||
856 | * ================== | ||
857 | * A WR can start towards the end of a queue, and then continue at the | ||
858 | * beginning, since the queue is considered to be circular. This will | ||
859 | * require a pair of address/size to be passed back to the caller - | ||
860 | * hence Work request pair format. | ||
861 | */ | ||
862 | int | ||
863 | csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size, | ||
864 | struct csio_wr_pair *wrp) | ||
865 | { | ||
866 | struct csio_wrm *wrm = csio_hw_to_wrm(hw); | ||
867 | struct csio_q *q = wrm->q_arr[qidx]; | ||
868 | void *cwr = (void *)((uintptr_t)(q->vstart) + | ||
869 | (q->pidx * CSIO_QCREDIT_SZ)); | ||
870 | struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap; | ||
871 | uint16_t cidx = q->cidx = ntohs(stp->cidx); | ||
872 | uint16_t pidx = q->pidx; | ||
873 | uint32_t req_sz = ALIGN(size, CSIO_QCREDIT_SZ); | ||
874 | int req_credits = req_sz / CSIO_QCREDIT_SZ; | ||
875 | int credits; | ||
876 | |||
877 | CSIO_DB_ASSERT(q->owner != NULL); | ||
878 | CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx)); | ||
879 | CSIO_DB_ASSERT(cidx <= q->credits); | ||
880 | |||
881 | /* Calculate credits */ | ||
882 | if (pidx > cidx) { | ||
883 | credits = q->credits - (pidx - cidx) - 1; | ||
884 | } else if (cidx > pidx) { | ||
885 | credits = cidx - pidx - 1; | ||
886 | } else { | ||
887 | /* cidx == pidx, empty queue */ | ||
888 | credits = q->credits; | ||
889 | CSIO_INC_STATS(q, n_qempty); | ||
890 | } | ||
891 | |||
892 | /* | ||
893 | * Check if we have enough credits. | ||
894 | * credits = 1 implies queue is full. | ||
895 | */ | ||
896 | if (!credits || (req_credits > credits)) { | ||
897 | CSIO_INC_STATS(q, n_qfull); | ||
898 | return -EBUSY; | ||
899 | } | ||
900 | |||
901 | /* | ||
902 | * If we are here, we have enough credits to satisfy the | ||
903 | * request. Check if we are near the end of q, and if WR spills over. | ||
904 | * If it does, use the first addr/size to cover the queue until | ||
905 | * the end. Fit the remainder portion of the request at the top | ||
906 | * of queue and return it in the second addr/len. Set pidx | ||
907 | * accordingly. | ||
908 | */ | ||
909 | if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) { | ||
910 | wrp->addr1 = cwr; | ||
911 | wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr); | ||
912 | wrp->addr2 = q->vstart; | ||
913 | wrp->size2 = req_sz - wrp->size1; | ||
914 | q->pidx = (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) / | ||
915 | CSIO_QCREDIT_SZ); | ||
916 | CSIO_INC_STATS(q, n_qwrap); | ||
917 | CSIO_INC_STATS(q, n_eq_wr_split); | ||
918 | } else { | ||
919 | wrp->addr1 = cwr; | ||
920 | wrp->size1 = req_sz; | ||
921 | wrp->addr2 = NULL; | ||
922 | wrp->size2 = 0; | ||
923 | q->pidx += (uint16_t)req_credits; | ||
924 | |||
925 | /* We are the end of queue, roll back pidx to top of queue */ | ||
926 | if (unlikely(q->pidx == q->credits)) { | ||
927 | q->pidx = 0; | ||
928 | CSIO_INC_STATS(q, n_qwrap); | ||
929 | } | ||
930 | } | ||
931 | |||
932 | q->inc_idx = (uint16_t)req_credits; | ||
933 | |||
934 | CSIO_INC_STATS(q, n_tot_reqs); | ||
935 | |||
936 | return 0; | ||
937 | } | ||
938 | |||
939 | /* | ||
940 | * csio_wr_copy_to_wrp - Copies given data into WR. | ||
941 | * @data_buf - Data buffer | ||
942 | * @wrp - Work request pair. | ||
943 | * @wr_off - Work request offset. | ||
944 | * @data_len - Data length. | ||
945 | * | ||
946 | * Copies the given data in Work Request. Work request pair(wrp) specifies | ||
947 | * address information of Work request. | ||
948 | * Returns: none | ||
949 | */ | ||
950 | void | ||
951 | csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp, | ||
952 | uint32_t wr_off, uint32_t data_len) | ||
953 | { | ||
954 | uint32_t nbytes; | ||
955 | |||
956 | /* Number of space available in buffer addr1 of WRP */ | ||
957 | nbytes = ((wrp->size1 - wr_off) >= data_len) ? | ||
958 | data_len : (wrp->size1 - wr_off); | ||
959 | |||
960 | memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes); | ||
961 | data_len -= nbytes; | ||
962 | |||
963 | /* Write the remaining data from the begining of circular buffer */ | ||
964 | if (data_len) { | ||
965 | CSIO_DB_ASSERT(data_len <= wrp->size2); | ||
966 | CSIO_DB_ASSERT(wrp->addr2 != NULL); | ||
967 | memcpy(wrp->addr2, (uint8_t *) data_buf + nbytes, data_len); | ||
968 | } | ||
969 | } | ||
970 | |||
971 | /* | ||
972 | * csio_wr_issue - Notify chip of Work request. | ||
973 | * @hw: HW module. | ||
974 | * @qidx: Index of queue. | ||
975 | * @prio: 0: Low priority, 1: High priority | ||
976 | * | ||
977 | * Rings the SGE Doorbell by writing the current producer index of the passed | ||
978 | * in queue into the register. | ||
979 | * | ||
980 | */ | ||
981 | int | ||
982 | csio_wr_issue(struct csio_hw *hw, int qidx, bool prio) | ||
983 | { | ||
984 | struct csio_wrm *wrm = csio_hw_to_wrm(hw); | ||
985 | struct csio_q *q = wrm->q_arr[qidx]; | ||
986 | |||
987 | CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx)); | ||
988 | |||
989 | wmb(); | ||
990 | /* Ring SGE Doorbell writing q->pidx into it */ | ||
991 | csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) | | ||
992 | PIDX(q->inc_idx), MYPF_REG(SGE_PF_KDOORBELL)); | ||
993 | q->inc_idx = 0; | ||
994 | |||
995 | return 0; | ||
996 | } | ||
997 | |||
998 | static inline uint32_t | ||
999 | csio_wr_avail_qcredits(struct csio_q *q) | ||
1000 | { | ||
1001 | if (q->pidx > q->cidx) | ||
1002 | return q->pidx - q->cidx; | ||
1003 | else if (q->cidx > q->pidx) | ||
1004 | return q->credits - (q->cidx - q->pidx); | ||
1005 | else | ||
1006 | return 0; /* cidx == pidx, empty queue */ | ||
1007 | } | ||
1008 | |||
1009 | /* | ||
1010 | * csio_wr_inval_flq_buf - Invalidate a free list buffer entry. | ||
1011 | * @hw: HW module. | ||
1012 | * @flq: The freelist queue. | ||
1013 | * | ||
1014 | * Invalidate the driver's version of a freelist buffer entry, | ||
1015 | * without freeing the associated the DMA memory. The entry | ||
1016 | * to be invalidated is picked up from the current Free list | ||
1017 | * queue cidx. | ||
1018 | * | ||
1019 | */ | ||
1020 | static inline void | ||
1021 | csio_wr_inval_flq_buf(struct csio_hw *hw, struct csio_q *flq) | ||
1022 | { | ||
1023 | flq->cidx++; | ||
1024 | if (flq->cidx == flq->credits) { | ||
1025 | flq->cidx = 0; | ||
1026 | CSIO_INC_STATS(flq, n_qwrap); | ||
1027 | } | ||
1028 | } | ||
1029 | |||
1030 | /* | ||
1031 | * csio_wr_process_fl - Process a freelist completion. | ||
1032 | * @hw: HW module. | ||
1033 | * @q: The ingress queue attached to the Freelist. | ||
1034 | * @wr: The freelist completion WR in the ingress queue. | ||
1035 | * @len_to_qid: The lower 32-bits of the first flit of the RSP footer | ||
1036 | * @iq_handler: Caller's handler for this completion. | ||
1037 | * @priv: Private pointer of caller | ||
1038 | * | ||
1039 | */ | ||
1040 | static inline void | ||
1041 | csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q, | ||
1042 | void *wr, uint32_t len_to_qid, | ||
1043 | void (*iq_handler)(struct csio_hw *, void *, | ||
1044 | uint32_t, struct csio_fl_dma_buf *, | ||
1045 | void *), | ||
1046 | void *priv) | ||
1047 | { | ||
1048 | struct csio_wrm *wrm = csio_hw_to_wrm(hw); | ||
1049 | struct csio_sge *sge = &wrm->sge; | ||
1050 | struct csio_fl_dma_buf flb; | ||
1051 | struct csio_dma_buf *buf, *fbuf; | ||
1052 | uint32_t bufsz, len, lastlen = 0; | ||
1053 | int n; | ||
1054 | struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx]; | ||
1055 | |||
1056 | CSIO_DB_ASSERT(flq != NULL); | ||
1057 | |||
1058 | len = len_to_qid; | ||
1059 | |||
1060 | if (len & IQWRF_NEWBUF) { | ||
1061 | if (flq->un.fl.offset > 0) { | ||
1062 | csio_wr_inval_flq_buf(hw, flq); | ||
1063 | flq->un.fl.offset = 0; | ||
1064 | } | ||
1065 | len = IQWRF_LEN_GET(len); | ||
1066 | } | ||
1067 | |||
1068 | CSIO_DB_ASSERT(len != 0); | ||
1069 | |||
1070 | flb.totlen = len; | ||
1071 | |||
1072 | /* Consume all freelist buffers used for len bytes */ | ||
1073 | for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) { | ||
1074 | buf = &flq->un.fl.bufs[flq->cidx]; | ||
1075 | bufsz = csio_wr_fl_bufsz(sge, buf); | ||
1076 | |||
1077 | fbuf->paddr = buf->paddr; | ||
1078 | fbuf->vaddr = buf->vaddr; | ||
1079 | |||
1080 | flb.offset = flq->un.fl.offset; | ||
1081 | lastlen = min(bufsz, len); | ||
1082 | fbuf->len = lastlen; | ||
1083 | |||
1084 | len -= lastlen; | ||
1085 | if (!len) | ||
1086 | break; | ||
1087 | csio_wr_inval_flq_buf(hw, flq); | ||
1088 | } | ||
1089 | |||
1090 | flb.defer_free = flq->un.fl.packen ? 0 : 1; | ||
1091 | |||
1092 | iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer), | ||
1093 | &flb, priv); | ||
1094 | |||
1095 | if (flq->un.fl.packen) | ||
1096 | flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align); | ||
1097 | else | ||
1098 | csio_wr_inval_flq_buf(hw, flq); | ||
1099 | |||
1100 | } | ||
1101 | |||
1102 | /* | ||
1103 | * csio_is_new_iqwr - Is this a new Ingress queue entry ? | ||
1104 | * @q: Ingress quueue. | ||
1105 | * @ftr: Ingress queue WR SGE footer. | ||
1106 | * | ||
1107 | * The entry is new if our generation bit matches the corresponding | ||
1108 | * bit in the footer of the current WR. | ||
1109 | */ | ||
1110 | static inline bool | ||
1111 | csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr) | ||
1112 | { | ||
1113 | return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT)); | ||
1114 | } | ||
1115 | |||
1116 | /* | ||
1117 | * csio_wr_process_iq - Process elements in Ingress queue. | ||
1118 | * @hw: HW pointer | ||
1119 | * @qidx: Index of queue | ||
1120 | * @iq_handler: Handler for this queue | ||
1121 | * @priv: Caller's private pointer | ||
1122 | * | ||
1123 | * This routine walks through every entry of the ingress queue, calling | ||
1124 | * the provided iq_handler with the entry, until the generation bit | ||
1125 | * flips. | ||
1126 | */ | ||
1127 | int | ||
1128 | csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q, | ||
1129 | void (*iq_handler)(struct csio_hw *, void *, | ||
1130 | uint32_t, struct csio_fl_dma_buf *, | ||
1131 | void *), | ||
1132 | void *priv) | ||
1133 | { | ||
1134 | struct csio_wrm *wrm = csio_hw_to_wrm(hw); | ||
1135 | void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz)); | ||
1136 | struct csio_iqwr_footer *ftr; | ||
1137 | uint32_t wr_type, fw_qid, qid; | ||
1138 | struct csio_q *q_completed; | ||
1139 | struct csio_q *flq = csio_iq_has_fl(q) ? | ||
1140 | wrm->q_arr[q->un.iq.flq_idx] : NULL; | ||
1141 | int rv = 0; | ||
1142 | |||
1143 | /* Get the footer */ | ||
1144 | ftr = (struct csio_iqwr_footer *)((uintptr_t)wr + | ||
1145 | (q->wr_sz - sizeof(*ftr))); | ||
1146 | |||
1147 | /* | ||
1148 | * When q wrapped around last time, driver should have inverted | ||
1149 | * ic.genbit as well. | ||
1150 | */ | ||
1151 | while (csio_is_new_iqwr(q, ftr)) { | ||
1152 | |||
1153 | CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <= | ||
1154 | (uintptr_t)q->vwrap); | ||
1155 | rmb(); | ||
1156 | wr_type = IQWRF_TYPE_GET(ftr->u.type_gen); | ||
1157 | |||
1158 | switch (wr_type) { | ||
1159 | case X_RSPD_TYPE_CPL: | ||
1160 | /* Subtract footer from WR len */ | ||
1161 | iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv); | ||
1162 | break; | ||
1163 | case X_RSPD_TYPE_FLBUF: | ||
1164 | csio_wr_process_fl(hw, q, wr, | ||
1165 | ntohl(ftr->pldbuflen_qid), | ||
1166 | iq_handler, priv); | ||
1167 | break; | ||
1168 | case X_RSPD_TYPE_INTR: | ||
1169 | fw_qid = ntohl(ftr->pldbuflen_qid); | ||
1170 | qid = fw_qid - wrm->fw_iq_start; | ||
1171 | q_completed = hw->wrm.intr_map[qid]; | ||
1172 | |||
1173 | if (unlikely(qid == | ||
1174 | csio_q_physiqid(hw, hw->intr_iq_idx))) { | ||
1175 | /* | ||
1176 | * We are already in the Forward Interrupt | ||
1177 | * Interrupt Queue Service! Do-not service | ||
1178 | * again! | ||
1179 | * | ||
1180 | */ | ||
1181 | } else { | ||
1182 | CSIO_DB_ASSERT(q_completed); | ||
1183 | CSIO_DB_ASSERT( | ||
1184 | q_completed->un.iq.iq_intx_handler); | ||
1185 | |||
1186 | /* Call the queue handler. */ | ||
1187 | q_completed->un.iq.iq_intx_handler(hw, NULL, | ||
1188 | 0, NULL, (void *)q_completed); | ||
1189 | } | ||
1190 | break; | ||
1191 | default: | ||
1192 | csio_warn(hw, "Unknown resp type 0x%x received\n", | ||
1193 | wr_type); | ||
1194 | CSIO_INC_STATS(q, n_rsp_unknown); | ||
1195 | break; | ||
1196 | } | ||
1197 | |||
1198 | /* | ||
1199 | * Ingress *always* has fixed size WR entries. Therefore, | ||
1200 | * there should always be complete WRs towards the end of | ||
1201 | * queue. | ||
1202 | */ | ||
1203 | if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) { | ||
1204 | |||
1205 | /* Roll over to start of queue */ | ||
1206 | q->cidx = 0; | ||
1207 | wr = q->vstart; | ||
1208 | |||
1209 | /* Toggle genbit */ | ||
1210 | q->un.iq.genbit ^= 0x1; | ||
1211 | |||
1212 | CSIO_INC_STATS(q, n_qwrap); | ||
1213 | } else { | ||
1214 | q->cidx++; | ||
1215 | wr = (void *)((uintptr_t)(q->vstart) + | ||
1216 | (q->cidx * q->wr_sz)); | ||
1217 | } | ||
1218 | |||
1219 | ftr = (struct csio_iqwr_footer *)((uintptr_t)wr + | ||
1220 | (q->wr_sz - sizeof(*ftr))); | ||
1221 | q->inc_idx++; | ||
1222 | |||
1223 | } /* while (q->un.iq.genbit == hdr->genbit) */ | ||
1224 | |||
1225 | /* | ||
1226 | * We need to re-arm SGE interrupts in case we got a stray interrupt, | ||
1227 | * especially in msix mode. With INTx, this may be a common occurence. | ||
1228 | */ | ||
1229 | if (unlikely(!q->inc_idx)) { | ||
1230 | CSIO_INC_STATS(q, n_stray_comp); | ||
1231 | rv = -EINVAL; | ||
1232 | goto restart; | ||
1233 | } | ||
1234 | |||
1235 | /* Replenish free list buffers if pending falls below low water mark */ | ||
1236 | if (flq) { | ||
1237 | uint32_t avail = csio_wr_avail_qcredits(flq); | ||
1238 | if (avail <= 16) { | ||
1239 | /* Make sure in FLQ, atleast 1 credit (8 FL buffers) | ||
1240 | * remains unpopulated otherwise HW thinks | ||
1241 | * FLQ is empty. | ||
1242 | */ | ||
1243 | csio_wr_update_fl(hw, flq, (flq->credits - 8) - avail); | ||
1244 | csio_wr_ring_fldb(hw, flq); | ||
1245 | } | ||
1246 | } | ||
1247 | |||
1248 | restart: | ||
1249 | /* Now inform SGE about our incremental index value */ | ||
1250 | csio_wr_reg32(hw, CIDXINC(q->inc_idx) | | ||
1251 | INGRESSQID(q->un.iq.physiqid) | | ||
1252 | TIMERREG(csio_sge_timer_reg), | ||
1253 | MYPF_REG(SGE_PF_GTS)); | ||
1254 | q->stats.n_tot_rsps += q->inc_idx; | ||
1255 | |||
1256 | q->inc_idx = 0; | ||
1257 | |||
1258 | return rv; | ||
1259 | } | ||
1260 | |||
1261 | int | ||
1262 | csio_wr_process_iq_idx(struct csio_hw *hw, int qidx, | ||
1263 | void (*iq_handler)(struct csio_hw *, void *, | ||
1264 | uint32_t, struct csio_fl_dma_buf *, | ||
1265 | void *), | ||
1266 | void *priv) | ||
1267 | { | ||
1268 | struct csio_wrm *wrm = csio_hw_to_wrm(hw); | ||
1269 | struct csio_q *iq = wrm->q_arr[qidx]; | ||
1270 | |||
1271 | return csio_wr_process_iq(hw, iq, iq_handler, priv); | ||
1272 | } | ||
1273 | |||
1274 | static int | ||
1275 | csio_closest_timer(struct csio_sge *s, int time) | ||
1276 | { | ||
1277 | int i, delta, match = 0, min_delta = INT_MAX; | ||
1278 | |||
1279 | for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { | ||
1280 | delta = time - s->timer_val[i]; | ||
1281 | if (delta < 0) | ||
1282 | delta = -delta; | ||
1283 | if (delta < min_delta) { | ||
1284 | min_delta = delta; | ||
1285 | match = i; | ||
1286 | } | ||
1287 | } | ||
1288 | return match; | ||
1289 | } | ||
1290 | |||
1291 | static int | ||
1292 | csio_closest_thresh(struct csio_sge *s, int cnt) | ||
1293 | { | ||
1294 | int i, delta, match = 0, min_delta = INT_MAX; | ||
1295 | |||
1296 | for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { | ||
1297 | delta = cnt - s->counter_val[i]; | ||
1298 | if (delta < 0) | ||
1299 | delta = -delta; | ||
1300 | if (delta < min_delta) { | ||
1301 | min_delta = delta; | ||
1302 | match = i; | ||
1303 | } | ||
1304 | } | ||
1305 | return match; | ||
1306 | } | ||
1307 | |||
1308 | static void | ||
1309 | csio_wr_fixup_host_params(struct csio_hw *hw) | ||
1310 | { | ||
1311 | struct csio_wrm *wrm = csio_hw_to_wrm(hw); | ||
1312 | struct csio_sge *sge = &wrm->sge; | ||
1313 | uint32_t clsz = L1_CACHE_BYTES; | ||
1314 | uint32_t s_hps = PAGE_SHIFT - 10; | ||
1315 | uint32_t ingpad = 0; | ||
1316 | uint32_t stat_len = clsz > 64 ? 128 : 64; | ||
1317 | |||
1318 | csio_wr_reg32(hw, HOSTPAGESIZEPF0(s_hps) | HOSTPAGESIZEPF1(s_hps) | | ||
1319 | HOSTPAGESIZEPF2(s_hps) | HOSTPAGESIZEPF3(s_hps) | | ||
1320 | HOSTPAGESIZEPF4(s_hps) | HOSTPAGESIZEPF5(s_hps) | | ||
1321 | HOSTPAGESIZEPF6(s_hps) | HOSTPAGESIZEPF7(s_hps), | ||
1322 | SGE_HOST_PAGE_SIZE); | ||
1323 | |||
1324 | sge->csio_fl_align = clsz < 32 ? 32 : clsz; | ||
1325 | ingpad = ilog2(sge->csio_fl_align) - 5; | ||
1326 | |||
1327 | csio_set_reg_field(hw, SGE_CONTROL, INGPADBOUNDARY_MASK | | ||
1328 | EGRSTATUSPAGESIZE(1), | ||
1329 | INGPADBOUNDARY(ingpad) | | ||
1330 | EGRSTATUSPAGESIZE(stat_len != 64)); | ||
1331 | |||
1332 | /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */ | ||
1333 | csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0); | ||
1334 | csio_wr_reg32(hw, | ||
1335 | (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) + | ||
1336 | sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), | ||
1337 | SGE_FL_BUFFER_SIZE2); | ||
1338 | csio_wr_reg32(hw, | ||
1339 | (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) + | ||
1340 | sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), | ||
1341 | SGE_FL_BUFFER_SIZE3); | ||
1342 | |||
1343 | csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ); | ||
1344 | |||
1345 | /* default value of rx_dma_offset of the NIC driver */ | ||
1346 | csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK, | ||
1347 | PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET)); | ||
1348 | } | ||
1349 | |||
1350 | static void | ||
1351 | csio_init_intr_coalesce_parms(struct csio_hw *hw) | ||
1352 | { | ||
1353 | struct csio_wrm *wrm = csio_hw_to_wrm(hw); | ||
1354 | struct csio_sge *sge = &wrm->sge; | ||
1355 | |||
1356 | csio_sge_thresh_reg = csio_closest_thresh(sge, csio_intr_coalesce_cnt); | ||
1357 | if (csio_intr_coalesce_cnt) { | ||
1358 | csio_sge_thresh_reg = 0; | ||
1359 | csio_sge_timer_reg = X_TIMERREG_RESTART_COUNTER; | ||
1360 | return; | ||
1361 | } | ||
1362 | |||
1363 | csio_sge_timer_reg = csio_closest_timer(sge, csio_intr_coalesce_time); | ||
1364 | } | ||
1365 | |||
1366 | /* | ||
1367 | * csio_wr_get_sge - Get SGE register values. | ||
1368 | * @hw: HW module. | ||
1369 | * | ||
1370 | * Used by non-master functions and by master-functions relying on config file. | ||
1371 | */ | ||
1372 | static void | ||
1373 | csio_wr_get_sge(struct csio_hw *hw) | ||
1374 | { | ||
1375 | struct csio_wrm *wrm = csio_hw_to_wrm(hw); | ||
1376 | struct csio_sge *sge = &wrm->sge; | ||
1377 | uint32_t ingpad; | ||
1378 | int i; | ||
1379 | u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; | ||
1380 | u32 ingress_rx_threshold; | ||
1381 | |||
1382 | sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL); | ||
1383 | |||
1384 | ingpad = INGPADBOUNDARY_GET(sge->sge_control); | ||
1385 | |||
1386 | switch (ingpad) { | ||
1387 | case X_INGPCIEBOUNDARY_32B: | ||
1388 | sge->csio_fl_align = 32; break; | ||
1389 | case X_INGPCIEBOUNDARY_64B: | ||
1390 | sge->csio_fl_align = 64; break; | ||
1391 | case X_INGPCIEBOUNDARY_128B: | ||
1392 | sge->csio_fl_align = 128; break; | ||
1393 | case X_INGPCIEBOUNDARY_256B: | ||
1394 | sge->csio_fl_align = 256; break; | ||
1395 | case X_INGPCIEBOUNDARY_512B: | ||
1396 | sge->csio_fl_align = 512; break; | ||
1397 | case X_INGPCIEBOUNDARY_1024B: | ||
1398 | sge->csio_fl_align = 1024; break; | ||
1399 | case X_INGPCIEBOUNDARY_2048B: | ||
1400 | sge->csio_fl_align = 2048; break; | ||
1401 | case X_INGPCIEBOUNDARY_4096B: | ||
1402 | sge->csio_fl_align = 4096; break; | ||
1403 | } | ||
1404 | |||
1405 | for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++) | ||
1406 | csio_get_flbuf_size(hw, sge, i); | ||
1407 | |||
1408 | timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1); | ||
1409 | timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3); | ||
1410 | timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5); | ||
1411 | |||
1412 | sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw, | ||
1413 | TIMERVALUE0_GET(timer_value_0_and_1)); | ||
1414 | sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw, | ||
1415 | TIMERVALUE1_GET(timer_value_0_and_1)); | ||
1416 | sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw, | ||
1417 | TIMERVALUE2_GET(timer_value_2_and_3)); | ||
1418 | sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw, | ||
1419 | TIMERVALUE3_GET(timer_value_2_and_3)); | ||
1420 | sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw, | ||
1421 | TIMERVALUE4_GET(timer_value_4_and_5)); | ||
1422 | sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw, | ||
1423 | TIMERVALUE5_GET(timer_value_4_and_5)); | ||
1424 | |||
1425 | ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD); | ||
1426 | sge->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold); | ||
1427 | sge->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold); | ||
1428 | sge->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold); | ||
1429 | sge->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold); | ||
1430 | |||
1431 | csio_init_intr_coalesce_parms(hw); | ||
1432 | } | ||
1433 | |||
1434 | /* | ||
1435 | * csio_wr_set_sge - Initialize SGE registers | ||
1436 | * @hw: HW module. | ||
1437 | * | ||
1438 | * Used by Master function to initialize SGE registers in the absence | ||
1439 | * of a config file. | ||
1440 | */ | ||
1441 | static void | ||
1442 | csio_wr_set_sge(struct csio_hw *hw) | ||
1443 | { | ||
1444 | struct csio_wrm *wrm = csio_hw_to_wrm(hw); | ||
1445 | struct csio_sge *sge = &wrm->sge; | ||
1446 | int i; | ||
1447 | |||
1448 | /* | ||
1449 | * Set up our basic SGE mode to deliver CPL messages to our Ingress | ||
1450 | * Queue and Packet Date to the Free List. | ||
1451 | */ | ||
1452 | csio_set_reg_field(hw, SGE_CONTROL, RXPKTCPLMODE(1), RXPKTCPLMODE(1)); | ||
1453 | |||
1454 | sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL); | ||
1455 | |||
1456 | /* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */ | ||
1457 | |||
1458 | /* | ||
1459 | * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows | ||
1460 | * and generate an interrupt when this occurs so we can recover. | ||
1461 | */ | ||
1462 | csio_set_reg_field(hw, SGE_DBFIFO_STATUS, | ||
1463 | HP_INT_THRESH(HP_INT_THRESH_MASK) | | ||
1464 | LP_INT_THRESH(LP_INT_THRESH_MASK), | ||
1465 | HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) | | ||
1466 | LP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH)); | ||
1467 | csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP, | ||
1468 | ENABLE_DROP); | ||
1469 | |||
1470 | /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */ | ||
1471 | |||
1472 | CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1); | ||
1473 | CSIO_SET_FLBUF_SIZE(hw, 2, CSIO_SGE_FLBUF_SIZE2); | ||
1474 | CSIO_SET_FLBUF_SIZE(hw, 3, CSIO_SGE_FLBUF_SIZE3); | ||
1475 | CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4); | ||
1476 | CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5); | ||
1477 | CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6); | ||
1478 | CSIO_SET_FLBUF_SIZE(hw, 7, CSIO_SGE_FLBUF_SIZE7); | ||
1479 | CSIO_SET_FLBUF_SIZE(hw, 8, CSIO_SGE_FLBUF_SIZE8); | ||
1480 | |||
1481 | for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++) | ||
1482 | csio_get_flbuf_size(hw, sge, i); | ||
1483 | |||
1484 | /* Initialize interrupt coalescing attributes */ | ||
1485 | sge->timer_val[0] = CSIO_SGE_TIMER_VAL_0; | ||
1486 | sge->timer_val[1] = CSIO_SGE_TIMER_VAL_1; | ||
1487 | sge->timer_val[2] = CSIO_SGE_TIMER_VAL_2; | ||
1488 | sge->timer_val[3] = CSIO_SGE_TIMER_VAL_3; | ||
1489 | sge->timer_val[4] = CSIO_SGE_TIMER_VAL_4; | ||
1490 | sge->timer_val[5] = CSIO_SGE_TIMER_VAL_5; | ||
1491 | |||
1492 | sge->counter_val[0] = CSIO_SGE_INT_CNT_VAL_0; | ||
1493 | sge->counter_val[1] = CSIO_SGE_INT_CNT_VAL_1; | ||
1494 | sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2; | ||
1495 | sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3; | ||
1496 | |||
1497 | csio_wr_reg32(hw, THRESHOLD_0(sge->counter_val[0]) | | ||
1498 | THRESHOLD_1(sge->counter_val[1]) | | ||
1499 | THRESHOLD_2(sge->counter_val[2]) | | ||
1500 | THRESHOLD_3(sge->counter_val[3]), | ||
1501 | SGE_INGRESS_RX_THRESHOLD); | ||
1502 | |||
1503 | csio_wr_reg32(hw, | ||
1504 | TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) | | ||
1505 | TIMERVALUE1(csio_us_to_core_ticks(hw, sge->timer_val[1])), | ||
1506 | SGE_TIMER_VALUE_0_AND_1); | ||
1507 | |||
1508 | csio_wr_reg32(hw, | ||
1509 | TIMERVALUE2(csio_us_to_core_ticks(hw, sge->timer_val[2])) | | ||
1510 | TIMERVALUE3(csio_us_to_core_ticks(hw, sge->timer_val[3])), | ||
1511 | SGE_TIMER_VALUE_2_AND_3); | ||
1512 | |||
1513 | csio_wr_reg32(hw, | ||
1514 | TIMERVALUE4(csio_us_to_core_ticks(hw, sge->timer_val[4])) | | ||
1515 | TIMERVALUE5(csio_us_to_core_ticks(hw, sge->timer_val[5])), | ||
1516 | SGE_TIMER_VALUE_4_AND_5); | ||
1517 | |||
1518 | csio_init_intr_coalesce_parms(hw); | ||
1519 | } | ||
1520 | |||
1521 | void | ||
1522 | csio_wr_sge_init(struct csio_hw *hw) | ||
1523 | { | ||
1524 | /* | ||
1525 | * If we are master: | ||
1526 | * - If we plan to use the config file, we need to fixup some | ||
1527 | * host specific registers, and read the rest of the SGE | ||
1528 | * configuration. | ||
1529 | * - If we dont plan to use the config file, we need to initialize | ||
1530 | * SGE entirely, including fixing the host specific registers. | ||
1531 | * If we arent the master, we are only allowed to read and work off of | ||
1532 | * the already initialized SGE values. | ||
1533 | * | ||
1534 | * Therefore, before calling this function, we assume that the master- | ||
1535 | * ship of the card, and whether to use config file or not, have | ||
1536 | * already been decided. In other words, CSIO_HWF_USING_SOFT_PARAMS and | ||
1537 | * CSIO_HWF_MASTER should be set/unset. | ||
1538 | */ | ||
1539 | if (csio_is_hw_master(hw)) { | ||
1540 | csio_wr_fixup_host_params(hw); | ||
1541 | |||
1542 | if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) | ||
1543 | csio_wr_get_sge(hw); | ||
1544 | else | ||
1545 | csio_wr_set_sge(hw); | ||
1546 | } else | ||
1547 | csio_wr_get_sge(hw); | ||
1548 | } | ||
1549 | |||
1550 | /* | ||
1551 | * csio_wrm_init - Initialize Work request module. | ||
1552 | * @wrm: WR module | ||
1553 | * @hw: HW pointer | ||
1554 | * | ||
1555 | * Allocates memory for an array of queue pointers starting at q_arr. | ||
1556 | */ | ||
1557 | int | ||
1558 | csio_wrm_init(struct csio_wrm *wrm, struct csio_hw *hw) | ||
1559 | { | ||
1560 | int i; | ||
1561 | |||
1562 | if (!wrm->num_q) { | ||
1563 | csio_err(hw, "Num queues is not set\n"); | ||
1564 | return -EINVAL; | ||
1565 | } | ||
1566 | |||
1567 | wrm->q_arr = kzalloc(sizeof(struct csio_q *) * wrm->num_q, GFP_KERNEL); | ||
1568 | if (!wrm->q_arr) | ||
1569 | goto err; | ||
1570 | |||
1571 | for (i = 0; i < wrm->num_q; i++) { | ||
1572 | wrm->q_arr[i] = kzalloc(sizeof(struct csio_q), GFP_KERNEL); | ||
1573 | if (!wrm->q_arr[i]) { | ||
1574 | while (--i >= 0) | ||
1575 | kfree(wrm->q_arr[i]); | ||
1576 | goto err_free_arr; | ||
1577 | } | ||
1578 | } | ||
1579 | wrm->free_qidx = 0; | ||
1580 | |||
1581 | return 0; | ||
1582 | |||
1583 | err_free_arr: | ||
1584 | kfree(wrm->q_arr); | ||
1585 | err: | ||
1586 | return -ENOMEM; | ||
1587 | } | ||
1588 | |||
1589 | /* | ||
1590 | * csio_wrm_exit - Initialize Work request module. | ||
1591 | * @wrm: WR module | ||
1592 | * @hw: HW module | ||
1593 | * | ||
1594 | * Uninitialize WR module. Free q_arr and pointers in it. | ||
1595 | * We have the additional job of freeing the DMA memory associated | ||
1596 | * with the queues. | ||
1597 | */ | ||
1598 | void | ||
1599 | csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw) | ||
1600 | { | ||
1601 | int i; | ||
1602 | uint32_t j; | ||
1603 | struct csio_q *q; | ||
1604 | struct csio_dma_buf *buf; | ||
1605 | |||
1606 | for (i = 0; i < wrm->num_q; i++) { | ||
1607 | q = wrm->q_arr[i]; | ||
1608 | |||
1609 | if (wrm->free_qidx && (i < wrm->free_qidx)) { | ||
1610 | if (q->type == CSIO_FREELIST) { | ||
1611 | if (!q->un.fl.bufs) | ||
1612 | continue; | ||
1613 | for (j = 0; j < q->credits; j++) { | ||
1614 | buf = &q->un.fl.bufs[j]; | ||
1615 | if (!buf->vaddr) | ||
1616 | continue; | ||
1617 | pci_free_consistent(hw->pdev, buf->len, | ||
1618 | buf->vaddr, | ||
1619 | buf->paddr); | ||
1620 | } | ||
1621 | kfree(q->un.fl.bufs); | ||
1622 | } | ||
1623 | pci_free_consistent(hw->pdev, q->size, | ||
1624 | q->vstart, q->pstart); | ||
1625 | } | ||
1626 | kfree(q); | ||
1627 | } | ||
1628 | |||
1629 | hw->flags &= ~CSIO_HWF_Q_MEM_ALLOCED; | ||
1630 | |||
1631 | kfree(wrm->q_arr); | ||
1632 | } | ||
diff --git a/drivers/scsi/csiostor/csio_wr.h b/drivers/scsi/csiostor/csio_wr.h new file mode 100644 index 000000000000..8d30e7ac1f5e --- /dev/null +++ b/drivers/scsi/csiostor/csio_wr.h | |||
@@ -0,0 +1,512 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef __CSIO_WR_H__ | ||
36 | #define __CSIO_WR_H__ | ||
37 | |||
38 | #include <linux/cache.h> | ||
39 | |||
40 | #include "csio_defs.h" | ||
41 | #include "t4fw_api.h" | ||
42 | #include "t4fw_api_stor.h" | ||
43 | |||
44 | /* | ||
45 | * SGE register field values. | ||
46 | */ | ||
47 | #define X_INGPCIEBOUNDARY_32B 0 | ||
48 | #define X_INGPCIEBOUNDARY_64B 1 | ||
49 | #define X_INGPCIEBOUNDARY_128B 2 | ||
50 | #define X_INGPCIEBOUNDARY_256B 3 | ||
51 | #define X_INGPCIEBOUNDARY_512B 4 | ||
52 | #define X_INGPCIEBOUNDARY_1024B 5 | ||
53 | #define X_INGPCIEBOUNDARY_2048B 6 | ||
54 | #define X_INGPCIEBOUNDARY_4096B 7 | ||
55 | |||
56 | /* GTS register */ | ||
57 | #define X_TIMERREG_COUNTER0 0 | ||
58 | #define X_TIMERREG_COUNTER1 1 | ||
59 | #define X_TIMERREG_COUNTER2 2 | ||
60 | #define X_TIMERREG_COUNTER3 3 | ||
61 | #define X_TIMERREG_COUNTER4 4 | ||
62 | #define X_TIMERREG_COUNTER5 5 | ||
63 | #define X_TIMERREG_RESTART_COUNTER 6 | ||
64 | #define X_TIMERREG_UPDATE_CIDX 7 | ||
65 | |||
66 | /* | ||
67 | * Egress Context field values | ||
68 | */ | ||
69 | #define X_FETCHBURSTMIN_16B 0 | ||
70 | #define X_FETCHBURSTMIN_32B 1 | ||
71 | #define X_FETCHBURSTMIN_64B 2 | ||
72 | #define X_FETCHBURSTMIN_128B 3 | ||
73 | |||
74 | #define X_FETCHBURSTMAX_64B 0 | ||
75 | #define X_FETCHBURSTMAX_128B 1 | ||
76 | #define X_FETCHBURSTMAX_256B 2 | ||
77 | #define X_FETCHBURSTMAX_512B 3 | ||
78 | |||
79 | #define X_HOSTFCMODE_NONE 0 | ||
80 | #define X_HOSTFCMODE_INGRESS_QUEUE 1 | ||
81 | #define X_HOSTFCMODE_STATUS_PAGE 2 | ||
82 | #define X_HOSTFCMODE_BOTH 3 | ||
83 | |||
84 | /* | ||
85 | * Ingress Context field values | ||
86 | */ | ||
87 | #define X_UPDATESCHEDULING_TIMER 0 | ||
88 | #define X_UPDATESCHEDULING_COUNTER_OPTTIMER 1 | ||
89 | |||
90 | #define X_UPDATEDELIVERY_NONE 0 | ||
91 | #define X_UPDATEDELIVERY_INTERRUPT 1 | ||
92 | #define X_UPDATEDELIVERY_STATUS_PAGE 2 | ||
93 | #define X_UPDATEDELIVERY_BOTH 3 | ||
94 | |||
95 | #define X_INTERRUPTDESTINATION_PCIE 0 | ||
96 | #define X_INTERRUPTDESTINATION_IQ 1 | ||
97 | |||
98 | #define X_RSPD_TYPE_FLBUF 0 | ||
99 | #define X_RSPD_TYPE_CPL 1 | ||
100 | #define X_RSPD_TYPE_INTR 2 | ||
101 | |||
102 | /* WR status is at the same position as retval in a CMD header */ | ||
103 | #define csio_wr_status(_wr) \ | ||
104 | (FW_CMD_RETVAL_GET(ntohl(((struct fw_cmd_hdr *)(_wr))->lo))) | ||
105 | |||
106 | struct csio_hw; | ||
107 | |||
108 | extern int csio_intr_coalesce_cnt; | ||
109 | extern int csio_intr_coalesce_time; | ||
110 | |||
111 | /* Ingress queue params */ | ||
112 | struct csio_iq_params { | ||
113 | |||
114 | uint8_t iq_start:1; | ||
115 | uint8_t iq_stop:1; | ||
116 | uint8_t pfn:3; | ||
117 | |||
118 | uint8_t vfn; | ||
119 | |||
120 | uint16_t physiqid; | ||
121 | uint16_t iqid; | ||
122 | |||
123 | uint16_t fl0id; | ||
124 | uint16_t fl1id; | ||
125 | |||
126 | uint8_t viid; | ||
127 | |||
128 | uint8_t type; | ||
129 | uint8_t iqasynch; | ||
130 | uint8_t reserved4; | ||
131 | |||
132 | uint8_t iqandst; | ||
133 | uint8_t iqanus; | ||
134 | uint8_t iqanud; | ||
135 | |||
136 | uint16_t iqandstindex; | ||
137 | |||
138 | uint8_t iqdroprss; | ||
139 | uint8_t iqpciech; | ||
140 | uint8_t iqdcaen; | ||
141 | |||
142 | uint8_t iqdcacpu; | ||
143 | uint8_t iqintcntthresh; | ||
144 | uint8_t iqo; | ||
145 | |||
146 | uint8_t iqcprio; | ||
147 | uint8_t iqesize; | ||
148 | |||
149 | uint16_t iqsize; | ||
150 | |||
151 | uint64_t iqaddr; | ||
152 | |||
153 | uint8_t iqflintiqhsen; | ||
154 | uint8_t reserved5; | ||
155 | uint8_t iqflintcongen; | ||
156 | uint8_t iqflintcngchmap; | ||
157 | |||
158 | uint32_t reserved6; | ||
159 | |||
160 | uint8_t fl0hostfcmode; | ||
161 | uint8_t fl0cprio; | ||
162 | uint8_t fl0paden; | ||
163 | uint8_t fl0packen; | ||
164 | uint8_t fl0congen; | ||
165 | uint8_t fl0dcaen; | ||
166 | |||
167 | uint8_t fl0dcacpu; | ||
168 | uint8_t fl0fbmin; | ||
169 | |||
170 | uint8_t fl0fbmax; | ||
171 | uint8_t fl0cidxfthresho; | ||
172 | uint8_t fl0cidxfthresh; | ||
173 | |||
174 | uint16_t fl0size; | ||
175 | |||
176 | uint64_t fl0addr; | ||
177 | |||
178 | uint64_t reserved7; | ||
179 | |||
180 | uint8_t fl1hostfcmode; | ||
181 | uint8_t fl1cprio; | ||
182 | uint8_t fl1paden; | ||
183 | uint8_t fl1packen; | ||
184 | uint8_t fl1congen; | ||
185 | uint8_t fl1dcaen; | ||
186 | |||
187 | uint8_t fl1dcacpu; | ||
188 | uint8_t fl1fbmin; | ||
189 | |||
190 | uint8_t fl1fbmax; | ||
191 | uint8_t fl1cidxfthresho; | ||
192 | uint8_t fl1cidxfthresh; | ||
193 | |||
194 | uint16_t fl1size; | ||
195 | |||
196 | uint64_t fl1addr; | ||
197 | }; | ||
198 | |||
199 | /* Egress queue params */ | ||
200 | struct csio_eq_params { | ||
201 | |||
202 | uint8_t pfn; | ||
203 | uint8_t vfn; | ||
204 | |||
205 | uint8_t eqstart:1; | ||
206 | uint8_t eqstop:1; | ||
207 | |||
208 | uint16_t physeqid; | ||
209 | uint32_t eqid; | ||
210 | |||
211 | uint8_t hostfcmode:2; | ||
212 | uint8_t cprio:1; | ||
213 | uint8_t pciechn:3; | ||
214 | |||
215 | uint16_t iqid; | ||
216 | |||
217 | uint8_t dcaen:1; | ||
218 | uint8_t dcacpu:5; | ||
219 | |||
220 | uint8_t fbmin:3; | ||
221 | uint8_t fbmax:3; | ||
222 | |||
223 | uint8_t cidxfthresho:1; | ||
224 | uint8_t cidxfthresh:3; | ||
225 | |||
226 | uint16_t eqsize; | ||
227 | |||
228 | uint64_t eqaddr; | ||
229 | }; | ||
230 | |||
231 | struct csio_dma_buf { | ||
232 | struct list_head list; | ||
233 | void *vaddr; /* Virtual address */ | ||
234 | dma_addr_t paddr; /* Physical address */ | ||
235 | uint32_t len; /* Buffer size */ | ||
236 | }; | ||
237 | |||
238 | /* Generic I/O request structure */ | ||
239 | struct csio_ioreq { | ||
240 | struct csio_sm sm; /* SM, List | ||
241 | * should be the first member | ||
242 | */ | ||
243 | int iq_idx; /* Ingress queue index */ | ||
244 | int eq_idx; /* Egress queue index */ | ||
245 | uint32_t nsge; /* Number of SG elements */ | ||
246 | uint32_t tmo; /* Driver timeout */ | ||
247 | uint32_t datadir; /* Data direction */ | ||
248 | struct csio_dma_buf dma_buf; /* Req/resp DMA buffers */ | ||
249 | uint16_t wr_status; /* WR completion status */ | ||
250 | int16_t drv_status; /* Driver internal status */ | ||
251 | struct csio_lnode *lnode; /* Owner lnode */ | ||
252 | struct csio_rnode *rnode; /* Src/destination rnode */ | ||
253 | void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *); | ||
254 | /* completion callback */ | ||
255 | void *scratch1; /* Scratch area 1. | ||
256 | */ | ||
257 | void *scratch2; /* Scratch area 2. */ | ||
258 | struct list_head gen_list; /* Any list associated with | ||
259 | * this ioreq. | ||
260 | */ | ||
261 | uint64_t fw_handle; /* Unique handle passed | ||
262 | * to FW | ||
263 | */ | ||
264 | uint8_t dcopy; /* Data copy required */ | ||
265 | uint8_t reserved1; | ||
266 | uint16_t reserved2; | ||
267 | struct completion cmplobj; /* ioreq completion object */ | ||
268 | } ____cacheline_aligned_in_smp; | ||
269 | |||
270 | /* | ||
271 | * Egress status page for egress cidx updates | ||
272 | */ | ||
273 | struct csio_qstatus_page { | ||
274 | __be32 qid; | ||
275 | __be16 cidx; | ||
276 | __be16 pidx; | ||
277 | }; | ||
278 | |||
279 | |||
280 | enum { | ||
281 | CSIO_MAX_FLBUF_PER_IQWR = 4, | ||
282 | CSIO_QCREDIT_SZ = 64, /* pidx/cidx increments | ||
283 | * in bytes | ||
284 | */ | ||
285 | CSIO_MAX_QID = 0xFFFF, | ||
286 | CSIO_MAX_IQ = 128, | ||
287 | |||
288 | CSIO_SGE_NTIMERS = 6, | ||
289 | CSIO_SGE_NCOUNTERS = 4, | ||
290 | CSIO_SGE_FL_SIZE_REGS = 16, | ||
291 | }; | ||
292 | |||
293 | /* Defines for type */ | ||
294 | enum { | ||
295 | CSIO_EGRESS = 1, | ||
296 | CSIO_INGRESS = 2, | ||
297 | CSIO_FREELIST = 3, | ||
298 | }; | ||
299 | |||
300 | /* | ||
301 | * Structure for footer (last 2 flits) of Ingress Queue Entry. | ||
302 | */ | ||
303 | struct csio_iqwr_footer { | ||
304 | __be32 hdrbuflen_pidx; | ||
305 | __be32 pldbuflen_qid; | ||
306 | union { | ||
307 | u8 type_gen; | ||
308 | __be64 last_flit; | ||
309 | } u; | ||
310 | }; | ||
311 | |||
312 | #define IQWRF_NEWBUF (1 << 31) | ||
313 | #define IQWRF_LEN_GET(x) (((x) >> 0) & 0x7fffffffU) | ||
314 | #define IQWRF_GEN_SHIFT 7 | ||
315 | #define IQWRF_TYPE_GET(x) (((x) >> 4) & 0x3U) | ||
316 | |||
317 | |||
318 | /* | ||
319 | * WR pair: | ||
320 | * ======== | ||
321 | * A WR can start towards the end of a queue, and then continue at the | ||
322 | * beginning, since the queue is considered to be circular. This will | ||
323 | * require a pair of address/len to be passed back to the caller - | ||
324 | * hence the Work request pair structure. | ||
325 | */ | ||
326 | struct csio_wr_pair { | ||
327 | void *addr1; | ||
328 | uint32_t size1; | ||
329 | void *addr2; | ||
330 | uint32_t size2; | ||
331 | }; | ||
332 | |||
333 | /* | ||
334 | * The following structure is used by ingress processing to return the | ||
335 | * free list buffers to consumers. | ||
336 | */ | ||
337 | struct csio_fl_dma_buf { | ||
338 | struct csio_dma_buf flbufs[CSIO_MAX_FLBUF_PER_IQWR]; | ||
339 | /* Freelist DMA buffers */ | ||
340 | int offset; /* Offset within the | ||
341 | * first FL buf. | ||
342 | */ | ||
343 | uint32_t totlen; /* Total length */ | ||
344 | uint8_t defer_free; /* Free of buffer can | ||
345 | * deferred | ||
346 | */ | ||
347 | }; | ||
348 | |||
349 | /* Data-types */ | ||
350 | typedef void (*iq_handler_t)(struct csio_hw *, void *, uint32_t, | ||
351 | struct csio_fl_dma_buf *, void *); | ||
352 | |||
353 | struct csio_iq { | ||
354 | uint16_t iqid; /* Queue ID */ | ||
355 | uint16_t physiqid; /* Physical Queue ID */ | ||
356 | uint16_t genbit; /* Generation bit, | ||
357 | * initially set to 1 | ||
358 | */ | ||
359 | int flq_idx; /* Freelist queue index */ | ||
360 | iq_handler_t iq_intx_handler; /* IQ INTx handler routine */ | ||
361 | }; | ||
362 | |||
363 | struct csio_eq { | ||
364 | uint16_t eqid; /* Qid */ | ||
365 | uint16_t physeqid; /* Physical Queue ID */ | ||
366 | uint8_t wrap[512]; /* Temp area for q-wrap around*/ | ||
367 | }; | ||
368 | |||
369 | struct csio_fl { | ||
370 | uint16_t flid; /* Qid */ | ||
371 | uint16_t packen; /* Packing enabled? */ | ||
372 | int offset; /* Offset within FL buf */ | ||
373 | int sreg; /* Size register */ | ||
374 | struct csio_dma_buf *bufs; /* Free list buffer ptr array | ||
375 | * indexed using flq->cidx/pidx | ||
376 | */ | ||
377 | }; | ||
378 | |||
379 | struct csio_qstats { | ||
380 | uint32_t n_tot_reqs; /* Total no. of Requests */ | ||
381 | uint32_t n_tot_rsps; /* Total no. of responses */ | ||
382 | uint32_t n_qwrap; /* Queue wraps */ | ||
383 | uint32_t n_eq_wr_split; /* Number of split EQ WRs */ | ||
384 | uint32_t n_qentry; /* Queue entry */ | ||
385 | uint32_t n_qempty; /* Queue empty */ | ||
386 | uint32_t n_qfull; /* Queue fulls */ | ||
387 | uint32_t n_rsp_unknown; /* Unknown response type */ | ||
388 | uint32_t n_stray_comp; /* Stray completion intr */ | ||
389 | uint32_t n_flq_refill; /* Number of FL refills */ | ||
390 | }; | ||
391 | |||
392 | /* Queue metadata */ | ||
393 | struct csio_q { | ||
394 | uint16_t type; /* Type: Ingress/Egress/FL */ | ||
395 | uint16_t pidx; /* producer index */ | ||
396 | uint16_t cidx; /* consumer index */ | ||
397 | uint16_t inc_idx; /* Incremental index */ | ||
398 | uint32_t wr_sz; /* Size of all WRs in this q | ||
399 | * if fixed | ||
400 | */ | ||
401 | void *vstart; /* Base virtual address | ||
402 | * of queue | ||
403 | */ | ||
404 | void *vwrap; /* Virtual end address to | ||
405 | * wrap around at | ||
406 | */ | ||
407 | uint32_t credits; /* Size of queue in credits */ | ||
408 | void *owner; /* Owner */ | ||
409 | union { /* Queue contexts */ | ||
410 | struct csio_iq iq; | ||
411 | struct csio_eq eq; | ||
412 | struct csio_fl fl; | ||
413 | } un; | ||
414 | |||
415 | dma_addr_t pstart; /* Base physical address of | ||
416 | * queue | ||
417 | */ | ||
418 | uint32_t portid; /* PCIE Channel */ | ||
419 | uint32_t size; /* Size of queue in bytes */ | ||
420 | struct csio_qstats stats; /* Statistics */ | ||
421 | } ____cacheline_aligned_in_smp; | ||
422 | |||
423 | struct csio_sge { | ||
424 | uint32_t csio_fl_align; /* Calculated and cached | ||
425 | * for fast path | ||
426 | */ | ||
427 | uint32_t sge_control; /* padding, boundaries, | ||
428 | * lengths, etc. | ||
429 | */ | ||
430 | uint32_t sge_host_page_size; /* Host page size */ | ||
431 | uint32_t sge_fl_buf_size[CSIO_SGE_FL_SIZE_REGS]; | ||
432 | /* free list buffer sizes */ | ||
433 | uint16_t timer_val[CSIO_SGE_NTIMERS]; | ||
434 | uint8_t counter_val[CSIO_SGE_NCOUNTERS]; | ||
435 | }; | ||
436 | |||
437 | /* Work request module */ | ||
438 | struct csio_wrm { | ||
439 | int num_q; /* Number of queues */ | ||
440 | struct csio_q **q_arr; /* Array of queue pointers | ||
441 | * allocated dynamically | ||
442 | * based on configured values | ||
443 | */ | ||
444 | uint32_t fw_iq_start; /* Start ID of IQ for this fn*/ | ||
445 | uint32_t fw_eq_start; /* Start ID of EQ for this fn*/ | ||
446 | struct csio_q *intr_map[CSIO_MAX_IQ]; | ||
447 | /* IQ-id to IQ map table. */ | ||
448 | int free_qidx; /* queue idx of free queue */ | ||
449 | struct csio_sge sge; /* SGE params */ | ||
450 | }; | ||
451 | |||
452 | #define csio_get_q(__hw, __idx) ((__hw)->wrm.q_arr[__idx]) | ||
453 | #define csio_q_type(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->type) | ||
454 | #define csio_q_pidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pidx) | ||
455 | #define csio_q_cidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->cidx) | ||
456 | #define csio_q_inc_idx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->inc_idx) | ||
457 | #define csio_q_vstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->vstart) | ||
458 | #define csio_q_pstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pstart) | ||
459 | #define csio_q_size(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->size) | ||
460 | #define csio_q_credits(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->credits) | ||
461 | #define csio_q_portid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->portid) | ||
462 | #define csio_q_wr_sz(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->wr_sz) | ||
463 | #define csio_q_iqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid) | ||
464 | #define csio_q_physiqid(__hw, __idx) \ | ||
465 | ((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid) | ||
466 | #define csio_q_iq_flq_idx(__hw, __idx) \ | ||
467 | ((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx) | ||
468 | #define csio_q_eqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.eqid) | ||
469 | #define csio_q_flid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.fl.flid) | ||
470 | |||
471 | #define csio_q_physeqid(__hw, __idx) \ | ||
472 | ((__hw)->wrm.q_arr[(__idx)]->un.eq.physeqid) | ||
473 | #define csio_iq_has_fl(__iq) ((__iq)->un.iq.flq_idx != -1) | ||
474 | |||
475 | #define csio_q_iq_to_flid(__hw, __iq_idx) \ | ||
476 | csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx) | ||
477 | #define csio_q_set_intr_map(__hw, __iq_idx, __rel_iq_id) \ | ||
478 | (__hw)->wrm.intr_map[__rel_iq_id] = csio_get_q(__hw, __iq_idx) | ||
479 | #define csio_q_eq_wrap(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.wrap) | ||
480 | |||
481 | struct csio_mb; | ||
482 | |||
483 | int csio_wr_alloc_q(struct csio_hw *, uint32_t, uint32_t, | ||
484 | uint16_t, void *, uint32_t, int, iq_handler_t); | ||
485 | int csio_wr_iq_create(struct csio_hw *, void *, int, | ||
486 | uint32_t, uint8_t, bool, | ||
487 | void (*)(struct csio_hw *, struct csio_mb *)); | ||
488 | int csio_wr_eq_create(struct csio_hw *, void *, int, int, uint8_t, | ||
489 | void (*)(struct csio_hw *, struct csio_mb *)); | ||
490 | int csio_wr_destroy_queues(struct csio_hw *, bool cmd); | ||
491 | |||
492 | |||
493 | int csio_wr_get(struct csio_hw *, int, uint32_t, | ||
494 | struct csio_wr_pair *); | ||
495 | void csio_wr_copy_to_wrp(void *, struct csio_wr_pair *, uint32_t, uint32_t); | ||
496 | int csio_wr_issue(struct csio_hw *, int, bool); | ||
497 | int csio_wr_process_iq(struct csio_hw *, struct csio_q *, | ||
498 | void (*)(struct csio_hw *, void *, | ||
499 | uint32_t, struct csio_fl_dma_buf *, | ||
500 | void *), | ||
501 | void *); | ||
502 | int csio_wr_process_iq_idx(struct csio_hw *, int, | ||
503 | void (*)(struct csio_hw *, void *, | ||
504 | uint32_t, struct csio_fl_dma_buf *, | ||
505 | void *), | ||
506 | void *); | ||
507 | |||
508 | void csio_wr_sge_init(struct csio_hw *); | ||
509 | int csio_wrm_init(struct csio_wrm *, struct csio_hw *); | ||
510 | void csio_wrm_exit(struct csio_wrm *, struct csio_hw *); | ||
511 | |||
512 | #endif /* ifndef __CSIO_WR_H__ */ | ||
diff --git a/drivers/scsi/csiostor/t4fw_api_stor.h b/drivers/scsi/csiostor/t4fw_api_stor.h new file mode 100644 index 000000000000..b96903a24e63 --- /dev/null +++ b/drivers/scsi/csiostor/t4fw_api_stor.h | |||
@@ -0,0 +1,578 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _T4FW_API_STOR_H_ | ||
36 | #define _T4FW_API_STOR_H_ | ||
37 | |||
38 | |||
39 | /****************************************************************************** | ||
40 | * R E T U R N V A L U E S | ||
41 | ********************************/ | ||
42 | |||
43 | enum fw_retval { | ||
44 | FW_SUCCESS = 0, /* completed sucessfully */ | ||
45 | FW_EPERM = 1, /* operation not permitted */ | ||
46 | FW_ENOENT = 2, /* no such file or directory */ | ||
47 | FW_EIO = 5, /* input/output error; hw bad */ | ||
48 | FW_ENOEXEC = 8, /* exec format error; inv microcode */ | ||
49 | FW_EAGAIN = 11, /* try again */ | ||
50 | FW_ENOMEM = 12, /* out of memory */ | ||
51 | FW_EFAULT = 14, /* bad address; fw bad */ | ||
52 | FW_EBUSY = 16, /* resource busy */ | ||
53 | FW_EEXIST = 17, /* file exists */ | ||
54 | FW_EINVAL = 22, /* invalid argument */ | ||
55 | FW_ENOSPC = 28, /* no space left on device */ | ||
56 | FW_ENOSYS = 38, /* functionality not implemented */ | ||
57 | FW_EPROTO = 71, /* protocol error */ | ||
58 | FW_EADDRINUSE = 98, /* address already in use */ | ||
59 | FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */ | ||
60 | FW_ENETDOWN = 100, /* network is down */ | ||
61 | FW_ENETUNREACH = 101, /* network is unreachable */ | ||
62 | FW_ENOBUFS = 105, /* no buffer space available */ | ||
63 | FW_ETIMEDOUT = 110, /* timeout */ | ||
64 | FW_EINPROGRESS = 115, /* fw internal */ | ||
65 | FW_SCSI_ABORT_REQUESTED = 128, /* */ | ||
66 | FW_SCSI_ABORT_TIMEDOUT = 129, /* */ | ||
67 | FW_SCSI_ABORTED = 130, /* */ | ||
68 | FW_SCSI_CLOSE_REQUESTED = 131, /* */ | ||
69 | FW_ERR_LINK_DOWN = 132, /* */ | ||
70 | FW_RDEV_NOT_READY = 133, /* */ | ||
71 | FW_ERR_RDEV_LOST = 134, /* */ | ||
72 | FW_ERR_RDEV_LOGO = 135, /* */ | ||
73 | FW_FCOE_NO_XCHG = 136, /* */ | ||
74 | FW_SCSI_RSP_ERR = 137, /* */ | ||
75 | FW_ERR_RDEV_IMPL_LOGO = 138, /* */ | ||
76 | FW_SCSI_UNDER_FLOW_ERR = 139, /* */ | ||
77 | FW_SCSI_OVER_FLOW_ERR = 140, /* */ | ||
78 | FW_SCSI_DDP_ERR = 141, /* DDP error*/ | ||
79 | FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */ | ||
80 | }; | ||
81 | |||
82 | enum fw_fcoe_link_sub_op { | ||
83 | FCOE_LINK_DOWN = 0x0, | ||
84 | FCOE_LINK_UP = 0x1, | ||
85 | FCOE_LINK_COND = 0x2, | ||
86 | }; | ||
87 | |||
88 | enum fw_fcoe_link_status { | ||
89 | FCOE_LINKDOWN = 0x0, | ||
90 | FCOE_LINKUP = 0x1, | ||
91 | }; | ||
92 | |||
93 | enum fw_ofld_prot { | ||
94 | PROT_FCOE = 0x1, | ||
95 | PROT_ISCSI = 0x2, | ||
96 | }; | ||
97 | |||
98 | enum rport_type_fcoe { | ||
99 | FLOGI_VFPORT = 0x1, /* 0xfffffe */ | ||
100 | FDISC_VFPORT = 0x2, /* 0xfffffe */ | ||
101 | NS_VNPORT = 0x3, /* 0xfffffc */ | ||
102 | REG_FC4_VNPORT = 0x4, /* any FC4 type VN_PORT */ | ||
103 | REG_VNPORT = 0x5, /* 0xfffxxx - non FC4 port in switch */ | ||
104 | FDMI_VNPORT = 0x6, /* 0xfffffa */ | ||
105 | FAB_CTLR_VNPORT = 0x7, /* 0xfffffd */ | ||
106 | }; | ||
107 | |||
108 | enum event_cause_fcoe { | ||
109 | PLOGI_ACC_RCVD = 0x01, | ||
110 | PLOGI_RJT_RCVD = 0x02, | ||
111 | PLOGI_RCVD = 0x03, | ||
112 | PLOGO_RCVD = 0x04, | ||
113 | PRLI_ACC_RCVD = 0x05, | ||
114 | PRLI_RJT_RCVD = 0x06, | ||
115 | PRLI_RCVD = 0x07, | ||
116 | PRLO_RCVD = 0x08, | ||
117 | NPORT_ID_CHGD = 0x09, | ||
118 | FLOGO_RCVD = 0x0a, | ||
119 | CLR_VIRT_LNK_RCVD = 0x0b, | ||
120 | FLOGI_ACC_RCVD = 0x0c, | ||
121 | FLOGI_RJT_RCVD = 0x0d, | ||
122 | FDISC_ACC_RCVD = 0x0e, | ||
123 | FDISC_RJT_RCVD = 0x0f, | ||
124 | FLOGI_TMO_MAX_RETRY = 0x10, | ||
125 | IMPL_LOGO_ADISC_ACC = 0x11, | ||
126 | IMPL_LOGO_ADISC_RJT = 0x12, | ||
127 | IMPL_LOGO_ADISC_CNFLT = 0x13, | ||
128 | PRLI_TMO = 0x14, | ||
129 | ADISC_TMO = 0x15, | ||
130 | RSCN_DEV_LOST = 0x16, | ||
131 | SCR_ACC_RCVD = 0x17, | ||
132 | ADISC_RJT_RCVD = 0x18, | ||
133 | LOGO_SNT = 0x19, | ||
134 | PROTO_ERR_IMPL_LOGO = 0x1a, | ||
135 | }; | ||
136 | |||
137 | enum fcoe_cmn_type { | ||
138 | FCOE_ELS, | ||
139 | FCOE_CT, | ||
140 | FCOE_SCSI_CMD, | ||
141 | FCOE_UNSOL_ELS, | ||
142 | }; | ||
143 | |||
144 | enum fw_wr_stor_opcodes { | ||
145 | FW_RDEV_WR = 0x38, | ||
146 | FW_FCOE_ELS_CT_WR = 0x30, | ||
147 | FW_SCSI_WRITE_WR = 0x31, | ||
148 | FW_SCSI_READ_WR = 0x32, | ||
149 | FW_SCSI_CMD_WR = 0x33, | ||
150 | FW_SCSI_ABRT_CLS_WR = 0x34, | ||
151 | }; | ||
152 | |||
153 | struct fw_rdev_wr { | ||
154 | __be32 op_to_immdlen; | ||
155 | __be32 alloc_to_len16; | ||
156 | __be64 cookie; | ||
157 | u8 protocol; | ||
158 | u8 event_cause; | ||
159 | u8 cur_state; | ||
160 | u8 prev_state; | ||
161 | __be32 flags_to_assoc_flowid; | ||
162 | union rdev_entry { | ||
163 | struct fcoe_rdev_entry { | ||
164 | __be32 flowid; | ||
165 | u8 protocol; | ||
166 | u8 event_cause; | ||
167 | u8 flags; | ||
168 | u8 rjt_reason; | ||
169 | u8 cur_login_st; | ||
170 | u8 prev_login_st; | ||
171 | __be16 rcv_fr_sz; | ||
172 | u8 rd_xfer_rdy_to_rport_type; | ||
173 | u8 vft_to_qos; | ||
174 | u8 org_proc_assoc_to_acc_rsp_code; | ||
175 | u8 enh_disc_to_tgt; | ||
176 | u8 wwnn[8]; | ||
177 | u8 wwpn[8]; | ||
178 | __be16 iqid; | ||
179 | u8 fc_oui[3]; | ||
180 | u8 r_id[3]; | ||
181 | } fcoe_rdev; | ||
182 | struct iscsi_rdev_entry { | ||
183 | __be32 flowid; | ||
184 | u8 protocol; | ||
185 | u8 event_cause; | ||
186 | u8 flags; | ||
187 | u8 r3; | ||
188 | __be16 iscsi_opts; | ||
189 | __be16 tcp_opts; | ||
190 | __be16 ip_opts; | ||
191 | __be16 max_rcv_len; | ||
192 | __be16 max_snd_len; | ||
193 | __be16 first_brst_len; | ||
194 | __be16 max_brst_len; | ||
195 | __be16 r4; | ||
196 | __be16 def_time2wait; | ||
197 | __be16 def_time2ret; | ||
198 | __be16 nop_out_intrvl; | ||
199 | __be16 non_scsi_to; | ||
200 | __be16 isid; | ||
201 | __be16 tsid; | ||
202 | __be16 port; | ||
203 | __be16 tpgt; | ||
204 | u8 r5[6]; | ||
205 | __be16 iqid; | ||
206 | } iscsi_rdev; | ||
207 | } u; | ||
208 | }; | ||
209 | |||
210 | #define FW_RDEV_WR_FLOWID_GET(x) (((x) >> 8) & 0xfffff) | ||
211 | #define FW_RDEV_WR_ASSOC_FLOWID_GET(x) (((x) >> 0) & 0xfffff) | ||
212 | #define FW_RDEV_WR_RPORT_TYPE_GET(x) (((x) >> 0) & 0x1f) | ||
213 | #define FW_RDEV_WR_NPIV_GET(x) (((x) >> 6) & 0x1) | ||
214 | #define FW_RDEV_WR_CLASS_GET(x) (((x) >> 4) & 0x3) | ||
215 | #define FW_RDEV_WR_TASK_RETRY_ID_GET(x) (((x) >> 5) & 0x1) | ||
216 | #define FW_RDEV_WR_RETRY_GET(x) (((x) >> 4) & 0x1) | ||
217 | #define FW_RDEV_WR_CONF_CMPL_GET(x) (((x) >> 3) & 0x1) | ||
218 | #define FW_RDEV_WR_INI_GET(x) (((x) >> 1) & 0x1) | ||
219 | #define FW_RDEV_WR_TGT_GET(x) (((x) >> 0) & 0x1) | ||
220 | |||
221 | struct fw_fcoe_els_ct_wr { | ||
222 | __be32 op_immdlen; | ||
223 | __be32 flowid_len16; | ||
224 | __be64 cookie; | ||
225 | __be16 iqid; | ||
226 | u8 tmo_val; | ||
227 | u8 els_ct_type; | ||
228 | u8 ctl_pri; | ||
229 | u8 cp_en_class; | ||
230 | __be16 xfer_cnt; | ||
231 | u8 fl_to_sp; | ||
232 | u8 l_id[3]; | ||
233 | u8 r5; | ||
234 | u8 r_id[3]; | ||
235 | __be64 rsp_dmaaddr; | ||
236 | __be32 rsp_dmalen; | ||
237 | __be32 r6; | ||
238 | }; | ||
239 | |||
240 | #define FW_FCOE_ELS_CT_WR_OPCODE(x) ((x) << 24) | ||
241 | #define FW_FCOE_ELS_CT_WR_OPCODE_GET(x) (((x) >> 24) & 0xff) | ||
242 | #define FW_FCOE_ELS_CT_WR_IMMDLEN(x) ((x) << 0) | ||
243 | #define FW_FCOE_ELS_CT_WR_IMMDLEN_GET(x) (((x) >> 0) & 0xff) | ||
244 | #define FW_FCOE_ELS_CT_WR_SP(x) ((x) << 0) | ||
245 | |||
246 | struct fw_scsi_write_wr { | ||
247 | __be32 op_immdlen; | ||
248 | __be32 flowid_len16; | ||
249 | __be64 cookie; | ||
250 | __be16 iqid; | ||
251 | u8 tmo_val; | ||
252 | u8 use_xfer_cnt; | ||
253 | union fw_scsi_write_priv { | ||
254 | struct fcoe_write_priv { | ||
255 | u8 ctl_pri; | ||
256 | u8 cp_en_class; | ||
257 | u8 r3_lo[2]; | ||
258 | } fcoe; | ||
259 | struct iscsi_write_priv { | ||
260 | u8 r3[4]; | ||
261 | } iscsi; | ||
262 | } u; | ||
263 | __be32 xfer_cnt; | ||
264 | __be32 ini_xfer_cnt; | ||
265 | __be64 rsp_dmaaddr; | ||
266 | __be32 rsp_dmalen; | ||
267 | __be32 r4; | ||
268 | }; | ||
269 | |||
270 | #define FW_SCSI_WRITE_WR_IMMDLEN(x) ((x) << 0) | ||
271 | |||
272 | struct fw_scsi_read_wr { | ||
273 | __be32 op_immdlen; | ||
274 | __be32 flowid_len16; | ||
275 | __be64 cookie; | ||
276 | __be16 iqid; | ||
277 | u8 tmo_val; | ||
278 | u8 use_xfer_cnt; | ||
279 | union fw_scsi_read_priv { | ||
280 | struct fcoe_read_priv { | ||
281 | u8 ctl_pri; | ||
282 | u8 cp_en_class; | ||
283 | u8 r3_lo[2]; | ||
284 | } fcoe; | ||
285 | struct iscsi_read_priv { | ||
286 | u8 r3[4]; | ||
287 | } iscsi; | ||
288 | } u; | ||
289 | __be32 xfer_cnt; | ||
290 | __be32 ini_xfer_cnt; | ||
291 | __be64 rsp_dmaaddr; | ||
292 | __be32 rsp_dmalen; | ||
293 | __be32 r4; | ||
294 | }; | ||
295 | |||
296 | #define FW_SCSI_READ_WR_IMMDLEN(x) ((x) << 0) | ||
297 | |||
298 | struct fw_scsi_cmd_wr { | ||
299 | __be32 op_immdlen; | ||
300 | __be32 flowid_len16; | ||
301 | __be64 cookie; | ||
302 | __be16 iqid; | ||
303 | u8 tmo_val; | ||
304 | u8 r3; | ||
305 | union fw_scsi_cmd_priv { | ||
306 | struct fcoe_cmd_priv { | ||
307 | u8 ctl_pri; | ||
308 | u8 cp_en_class; | ||
309 | u8 r4_lo[2]; | ||
310 | } fcoe; | ||
311 | struct iscsi_cmd_priv { | ||
312 | u8 r4[4]; | ||
313 | } iscsi; | ||
314 | } u; | ||
315 | u8 r5[8]; | ||
316 | __be64 rsp_dmaaddr; | ||
317 | __be32 rsp_dmalen; | ||
318 | __be32 r6; | ||
319 | }; | ||
320 | |||
321 | #define FW_SCSI_CMD_WR_IMMDLEN(x) ((x) << 0) | ||
322 | |||
323 | #define SCSI_ABORT 0 | ||
324 | #define SCSI_CLOSE 1 | ||
325 | |||
326 | struct fw_scsi_abrt_cls_wr { | ||
327 | __be32 op_immdlen; | ||
328 | __be32 flowid_len16; | ||
329 | __be64 cookie; | ||
330 | __be16 iqid; | ||
331 | u8 tmo_val; | ||
332 | u8 sub_opcode_to_chk_all_io; | ||
333 | u8 r3[4]; | ||
334 | __be64 t_cookie; | ||
335 | }; | ||
336 | |||
337 | #define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(x) ((x) << 2) | ||
338 | #define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(x) (((x) >> 2) & 0x3f) | ||
339 | #define FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(x) ((x) << 0) | ||
340 | |||
341 | enum fw_cmd_stor_opcodes { | ||
342 | FW_FCOE_RES_INFO_CMD = 0x31, | ||
343 | FW_FCOE_LINK_CMD = 0x32, | ||
344 | FW_FCOE_VNP_CMD = 0x33, | ||
345 | FW_FCOE_SPARAMS_CMD = 0x35, | ||
346 | FW_FCOE_STATS_CMD = 0x37, | ||
347 | FW_FCOE_FCF_CMD = 0x38, | ||
348 | }; | ||
349 | |||
350 | struct fw_fcoe_res_info_cmd { | ||
351 | __be32 op_to_read; | ||
352 | __be32 retval_len16; | ||
353 | __be16 e_d_tov; | ||
354 | __be16 r_a_tov_seq; | ||
355 | __be16 r_a_tov_els; | ||
356 | __be16 r_r_tov; | ||
357 | __be32 max_xchgs; | ||
358 | __be32 max_ssns; | ||
359 | __be32 used_xchgs; | ||
360 | __be32 used_ssns; | ||
361 | __be32 max_fcfs; | ||
362 | __be32 max_vnps; | ||
363 | __be32 used_fcfs; | ||
364 | __be32 used_vnps; | ||
365 | }; | ||
366 | |||
367 | struct fw_fcoe_link_cmd { | ||
368 | __be32 op_to_portid; | ||
369 | __be32 retval_len16; | ||
370 | __be32 sub_opcode_fcfi; | ||
371 | u8 r3; | ||
372 | u8 lstatus; | ||
373 | __be16 flags; | ||
374 | u8 r4; | ||
375 | u8 set_vlan; | ||
376 | __be16 vlan_id; | ||
377 | __be32 vnpi_pkd; | ||
378 | __be16 r6; | ||
379 | u8 phy_mac[6]; | ||
380 | u8 vnport_wwnn[8]; | ||
381 | u8 vnport_wwpn[8]; | ||
382 | }; | ||
383 | |||
384 | #define FW_FCOE_LINK_CMD_PORTID(x) ((x) << 0) | ||
385 | #define FW_FCOE_LINK_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) | ||
386 | #define FW_FCOE_LINK_CMD_SUB_OPCODE(x) ((x) << 24U) | ||
387 | #define FW_FCOE_LINK_CMD_FCFI(x) ((x) << 0) | ||
388 | #define FW_FCOE_LINK_CMD_FCFI_GET(x) (((x) >> 0) & 0xffffff) | ||
389 | #define FW_FCOE_LINK_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff) | ||
390 | |||
391 | struct fw_fcoe_vnp_cmd { | ||
392 | __be32 op_to_fcfi; | ||
393 | __be32 alloc_to_len16; | ||
394 | __be32 gen_wwn_to_vnpi; | ||
395 | __be32 vf_id; | ||
396 | __be16 iqid; | ||
397 | u8 vnport_mac[6]; | ||
398 | u8 vnport_wwnn[8]; | ||
399 | u8 vnport_wwpn[8]; | ||
400 | u8 cmn_srv_parms[16]; | ||
401 | u8 clsp_word_0_1[8]; | ||
402 | }; | ||
403 | |||
404 | #define FW_FCOE_VNP_CMD_FCFI(x) ((x) << 0) | ||
405 | #define FW_FCOE_VNP_CMD_ALLOC (1U << 31) | ||
406 | #define FW_FCOE_VNP_CMD_FREE (1U << 30) | ||
407 | #define FW_FCOE_VNP_CMD_MODIFY (1U << 29) | ||
408 | #define FW_FCOE_VNP_CMD_GEN_WWN (1U << 22) | ||
409 | #define FW_FCOE_VNP_CMD_VFID_EN (1U << 20) | ||
410 | #define FW_FCOE_VNP_CMD_VNPI(x) ((x) << 0) | ||
411 | #define FW_FCOE_VNP_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff) | ||
412 | |||
413 | struct fw_fcoe_sparams_cmd { | ||
414 | __be32 op_to_portid; | ||
415 | __be32 retval_len16; | ||
416 | u8 r3[7]; | ||
417 | u8 cos; | ||
418 | u8 lport_wwnn[8]; | ||
419 | u8 lport_wwpn[8]; | ||
420 | u8 cmn_srv_parms[16]; | ||
421 | u8 cls_srv_parms[16]; | ||
422 | }; | ||
423 | |||
424 | #define FW_FCOE_SPARAMS_CMD_PORTID(x) ((x) << 0) | ||
425 | |||
426 | struct fw_fcoe_stats_cmd { | ||
427 | __be32 op_to_flowid; | ||
428 | __be32 free_to_len16; | ||
429 | union fw_fcoe_stats { | ||
430 | struct fw_fcoe_stats_ctl { | ||
431 | u8 nstats_port; | ||
432 | u8 port_valid_ix; | ||
433 | __be16 r6; | ||
434 | __be32 r7; | ||
435 | __be64 stat0; | ||
436 | __be64 stat1; | ||
437 | __be64 stat2; | ||
438 | __be64 stat3; | ||
439 | __be64 stat4; | ||
440 | __be64 stat5; | ||
441 | } ctl; | ||
442 | struct fw_fcoe_port_stats { | ||
443 | __be64 tx_bcast_bytes; | ||
444 | __be64 tx_bcast_frames; | ||
445 | __be64 tx_mcast_bytes; | ||
446 | __be64 tx_mcast_frames; | ||
447 | __be64 tx_ucast_bytes; | ||
448 | __be64 tx_ucast_frames; | ||
449 | __be64 tx_drop_frames; | ||
450 | __be64 tx_offload_bytes; | ||
451 | __be64 tx_offload_frames; | ||
452 | __be64 rx_bcast_bytes; | ||
453 | __be64 rx_bcast_frames; | ||
454 | __be64 rx_mcast_bytes; | ||
455 | __be64 rx_mcast_frames; | ||
456 | __be64 rx_ucast_bytes; | ||
457 | __be64 rx_ucast_frames; | ||
458 | __be64 rx_err_frames; | ||
459 | } port_stats; | ||
460 | struct fw_fcoe_fcf_stats { | ||
461 | __be32 fip_tx_bytes; | ||
462 | __be32 fip_tx_fr; | ||
463 | __be64 fcf_ka; | ||
464 | __be64 mcast_adv_rcvd; | ||
465 | __be16 ucast_adv_rcvd; | ||
466 | __be16 sol_sent; | ||
467 | __be16 vlan_req; | ||
468 | __be16 vlan_rpl; | ||
469 | __be16 clr_vlink; | ||
470 | __be16 link_down; | ||
471 | __be16 link_up; | ||
472 | __be16 logo; | ||
473 | __be16 flogi_req; | ||
474 | __be16 flogi_rpl; | ||
475 | __be16 fdisc_req; | ||
476 | __be16 fdisc_rpl; | ||
477 | __be16 fka_prd_chg; | ||
478 | __be16 fc_map_chg; | ||
479 | __be16 vfid_chg; | ||
480 | u8 no_fka_req; | ||
481 | u8 no_vnp; | ||
482 | } fcf_stats; | ||
483 | struct fw_fcoe_pcb_stats { | ||
484 | __be64 tx_bytes; | ||
485 | __be64 tx_frames; | ||
486 | __be64 rx_bytes; | ||
487 | __be64 rx_frames; | ||
488 | __be32 vnp_ka; | ||
489 | __be32 unsol_els_rcvd; | ||
490 | __be64 unsol_cmd_rcvd; | ||
491 | __be16 implicit_logo; | ||
492 | __be16 flogi_inv_sparm; | ||
493 | __be16 fdisc_inv_sparm; | ||
494 | __be16 flogi_rjt; | ||
495 | __be16 fdisc_rjt; | ||
496 | __be16 no_ssn; | ||
497 | __be16 mac_flt_fail; | ||
498 | __be16 inv_fr_rcvd; | ||
499 | } pcb_stats; | ||
500 | struct fw_fcoe_scb_stats { | ||
501 | __be64 tx_bytes; | ||
502 | __be64 tx_frames; | ||
503 | __be64 rx_bytes; | ||
504 | __be64 rx_frames; | ||
505 | __be32 host_abrt_req; | ||
506 | __be32 adap_auto_abrt; | ||
507 | __be32 adap_abrt_rsp; | ||
508 | __be32 host_ios_req; | ||
509 | __be16 ssn_offl_ios; | ||
510 | __be16 ssn_not_rdy_ios; | ||
511 | u8 rx_data_ddp_err; | ||
512 | u8 ddp_flt_set_err; | ||
513 | __be16 rx_data_fr_err; | ||
514 | u8 bad_st_abrt_req; | ||
515 | u8 no_io_abrt_req; | ||
516 | u8 abort_tmo; | ||
517 | u8 abort_tmo_2; | ||
518 | __be32 abort_req; | ||
519 | u8 no_ppod_res_tmo; | ||
520 | u8 bp_tmo; | ||
521 | u8 adap_auto_cls; | ||
522 | u8 no_io_cls_req; | ||
523 | __be32 host_cls_req; | ||
524 | __be64 unsol_cmd_rcvd; | ||
525 | __be32 plogi_req_rcvd; | ||
526 | __be32 prli_req_rcvd; | ||
527 | __be16 logo_req_rcvd; | ||
528 | __be16 prlo_req_rcvd; | ||
529 | __be16 plogi_rjt_rcvd; | ||
530 | __be16 prli_rjt_rcvd; | ||
531 | __be32 adisc_req_rcvd; | ||
532 | __be32 rscn_rcvd; | ||
533 | __be32 rrq_req_rcvd; | ||
534 | __be32 unsol_els_rcvd; | ||
535 | u8 adisc_rjt_rcvd; | ||
536 | u8 scr_rjt; | ||
537 | u8 ct_rjt; | ||
538 | u8 inval_bls_rcvd; | ||
539 | __be32 ba_rjt_rcvd; | ||
540 | } scb_stats; | ||
541 | } u; | ||
542 | }; | ||
543 | |||
544 | #define FW_FCOE_STATS_CMD_FLOWID(x) ((x) << 0) | ||
545 | #define FW_FCOE_STATS_CMD_FREE (1U << 30) | ||
546 | #define FW_FCOE_STATS_CMD_NSTATS(x) ((x) << 4) | ||
547 | #define FW_FCOE_STATS_CMD_PORT(x) ((x) << 0) | ||
548 | #define FW_FCOE_STATS_CMD_PORT_VALID (1U << 7) | ||
549 | #define FW_FCOE_STATS_CMD_IX(x) ((x) << 0) | ||
550 | |||
551 | struct fw_fcoe_fcf_cmd { | ||
552 | __be32 op_to_fcfi; | ||
553 | __be32 retval_len16; | ||
554 | __be16 priority_pkd; | ||
555 | u8 mac[6]; | ||
556 | u8 name_id[8]; | ||
557 | u8 fabric[8]; | ||
558 | __be16 vf_id; | ||
559 | __be16 max_fcoe_size; | ||
560 | u8 vlan_id; | ||
561 | u8 fc_map[3]; | ||
562 | __be32 fka_adv; | ||
563 | __be32 r6; | ||
564 | u8 r7_hi; | ||
565 | u8 fpma_to_portid; | ||
566 | u8 spma_mac[6]; | ||
567 | __be64 r8; | ||
568 | }; | ||
569 | |||
570 | #define FW_FCOE_FCF_CMD_FCFI(x) ((x) << 0) | ||
571 | #define FW_FCOE_FCF_CMD_FCFI_GET(x) (((x) >> 0) & 0xfffff) | ||
572 | #define FW_FCOE_FCF_CMD_PRIORITY_GET(x) (((x) >> 0) & 0xff) | ||
573 | #define FW_FCOE_FCF_CMD_FPMA_GET(x) (((x) >> 6) & 0x1) | ||
574 | #define FW_FCOE_FCF_CMD_SPMA_GET(x) (((x) >> 5) & 0x1) | ||
575 | #define FW_FCOE_FCF_CMD_LOGIN_GET(x) (((x) >> 4) & 0x1) | ||
576 | #define FW_FCOE_FCF_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) | ||
577 | |||
578 | #endif /* _T4FW_API_STOR_H_ */ | ||