aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/csiostor/csio_rnode.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/csiostor/csio_rnode.c')
-rw-r--r--drivers/scsi/csiostor/csio_rnode.c912
1 files changed, 912 insertions, 0 deletions
diff --git a/drivers/scsi/csiostor/csio_rnode.c b/drivers/scsi/csiostor/csio_rnode.c
new file mode 100644
index 000000000000..b0ae430e436a
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_rnode.c
@@ -0,0 +1,912 @@
1/*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/string.h>
36#include <scsi/scsi_device.h>
37#include <scsi/scsi_transport_fc.h>
38#include <scsi/fc/fc_els.h>
39#include <scsi/fc/fc_fs.h>
40
41#include "csio_hw.h"
42#include "csio_lnode.h"
43#include "csio_rnode.h"
44
45static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *);
46static void csio_rnode_exit(struct csio_rnode *);
47
48/* Static machine forward declarations */
49static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev);
50static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev);
51static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev);
52static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev);
53
54/* RNF event mapping */
55static enum csio_rn_ev fwevt_to_rnevt[] = {
56 CSIO_RNFE_NONE, /* None */
57 CSIO_RNFE_LOGGED_IN, /* PLOGI_ACC_RCVD */
58 CSIO_RNFE_NONE, /* PLOGI_RJT_RCVD */
59 CSIO_RNFE_PLOGI_RECV, /* PLOGI_RCVD */
60 CSIO_RNFE_LOGO_RECV, /* PLOGO_RCVD */
61 CSIO_RNFE_PRLI_DONE, /* PRLI_ACC_RCVD */
62 CSIO_RNFE_NONE, /* PRLI_RJT_RCVD */
63 CSIO_RNFE_PRLI_RECV, /* PRLI_RCVD */
64 CSIO_RNFE_PRLO_RECV, /* PRLO_RCVD */
65 CSIO_RNFE_NONE, /* NPORT_ID_CHGD */
66 CSIO_RNFE_LOGO_RECV, /* FLOGO_RCVD */
67 CSIO_RNFE_NONE, /* CLR_VIRT_LNK_RCVD */
68 CSIO_RNFE_LOGGED_IN, /* FLOGI_ACC_RCVD */
69 CSIO_RNFE_NONE, /* FLOGI_RJT_RCVD */
70 CSIO_RNFE_LOGGED_IN, /* FDISC_ACC_RCVD */
71 CSIO_RNFE_NONE, /* FDISC_RJT_RCVD */
72 CSIO_RNFE_NONE, /* FLOGI_TMO_MAX_RETRY */
73 CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_ACC */
74 CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_RJT */
75 CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
76 CSIO_RNFE_NONE, /* PRLI_TMO */
77 CSIO_RNFE_NONE, /* ADISC_TMO */
78 CSIO_RNFE_NAME_MISSING, /* RSCN_DEV_LOST */
79 CSIO_RNFE_NONE, /* SCR_ACC_RCVD */
80 CSIO_RNFE_NONE, /* ADISC_RJT_RCVD */
81 CSIO_RNFE_NONE, /* LOGO_SNT */
82 CSIO_RNFE_LOGO_RECV, /* PROTO_ERR_IMPL_LOGO */
83};
84
85#define CSIO_FWE_TO_RNFE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
86 CSIO_RNFE_NONE : \
87 fwevt_to_rnevt[_evt])
88int
89csio_is_rnode_ready(struct csio_rnode *rn)
90{
91 return csio_match_state(rn, csio_rns_ready);
92}
93
94static int
95csio_is_rnode_uninit(struct csio_rnode *rn)
96{
97 return csio_match_state(rn, csio_rns_uninit);
98}
99
100static int
101csio_is_rnode_wka(uint8_t rport_type)
102{
103 if ((rport_type == FLOGI_VFPORT) ||
104 (rport_type == FDISC_VFPORT) ||
105 (rport_type == NS_VNPORT) ||
106 (rport_type == FDMI_VNPORT))
107 return 1;
108
109 return 0;
110}
111
112/*
113 * csio_rn_lookup - Finds the rnode with the given flowid
114 * @ln - lnode
115 * @flowid - flowid.
116 *
117 * Does the rnode lookup on the given lnode and flowid.If no matching entry
118 * found, NULL is returned.
119 */
120static struct csio_rnode *
121csio_rn_lookup(struct csio_lnode *ln, uint32_t flowid)
122{
123 struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
124 struct list_head *tmp;
125 struct csio_rnode *rn;
126
127 list_for_each(tmp, &rnhead->sm.sm_list) {
128 rn = (struct csio_rnode *) tmp;
129 if (rn->flowid == flowid)
130 return rn;
131 }
132
133 return NULL;
134}
135
136/*
137 * csio_rn_lookup_wwpn - Finds the rnode with the given wwpn
138 * @ln: lnode
139 * @wwpn: wwpn
140 *
141 * Does the rnode lookup on the given lnode and wwpn. If no matching entry
142 * found, NULL is returned.
143 */
144static struct csio_rnode *
145csio_rn_lookup_wwpn(struct csio_lnode *ln, uint8_t *wwpn)
146{
147 struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
148 struct list_head *tmp;
149 struct csio_rnode *rn;
150
151 list_for_each(tmp, &rnhead->sm.sm_list) {
152 rn = (struct csio_rnode *) tmp;
153 if (!memcmp(csio_rn_wwpn(rn), wwpn, 8))
154 return rn;
155 }
156
157 return NULL;
158}
159
160/**
161 * csio_rnode_lookup_portid - Finds the rnode with the given portid
162 * @ln: lnode
163 * @portid: port id
164 *
165 * Lookup the rnode list for a given portid. If no matching entry
166 * found, NULL is returned.
167 */
168struct csio_rnode *
169csio_rnode_lookup_portid(struct csio_lnode *ln, uint32_t portid)
170{
171 struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
172 struct list_head *tmp;
173 struct csio_rnode *rn;
174
175 list_for_each(tmp, &rnhead->sm.sm_list) {
176 rn = (struct csio_rnode *) tmp;
177 if (rn->nport_id == portid)
178 return rn;
179 }
180
181 return NULL;
182}
183
184static int
185csio_rn_dup_flowid(struct csio_lnode *ln, uint32_t rdev_flowid,
186 uint32_t *vnp_flowid)
187{
188 struct csio_rnode *rnhead;
189 struct list_head *tmp, *tmp1;
190 struct csio_rnode *rn;
191 struct csio_lnode *ln_tmp;
192 struct csio_hw *hw = csio_lnode_to_hw(ln);
193
194 list_for_each(tmp1, &hw->sln_head) {
195 ln_tmp = (struct csio_lnode *) tmp1;
196 if (ln_tmp == ln)
197 continue;
198
199 rnhead = (struct csio_rnode *)&ln_tmp->rnhead;
200 list_for_each(tmp, &rnhead->sm.sm_list) {
201
202 rn = (struct csio_rnode *) tmp;
203 if (csio_is_rnode_ready(rn)) {
204 if (rn->flowid == rdev_flowid) {
205 *vnp_flowid = csio_ln_flowid(ln_tmp);
206 return 1;
207 }
208 }
209 }
210 }
211
212 return 0;
213}
214
215static struct csio_rnode *
216csio_alloc_rnode(struct csio_lnode *ln)
217{
218 struct csio_hw *hw = csio_lnode_to_hw(ln);
219
220 struct csio_rnode *rn = mempool_alloc(hw->rnode_mempool, GFP_ATOMIC);
221 if (!rn)
222 goto err;
223
224 memset(rn, 0, sizeof(struct csio_rnode));
225 if (csio_rnode_init(rn, ln))
226 goto err_free;
227
228 CSIO_INC_STATS(ln, n_rnode_alloc);
229
230 return rn;
231
232err_free:
233 mempool_free(rn, hw->rnode_mempool);
234err:
235 CSIO_INC_STATS(ln, n_rnode_nomem);
236 return NULL;
237}
238
239static void
240csio_free_rnode(struct csio_rnode *rn)
241{
242 struct csio_hw *hw = csio_lnode_to_hw(csio_rnode_to_lnode(rn));
243
244 csio_rnode_exit(rn);
245 CSIO_INC_STATS(rn->lnp, n_rnode_free);
246 mempool_free(rn, hw->rnode_mempool);
247}
248
249/*
250 * csio_get_rnode - Gets rnode with the given flowid
251 * @ln - lnode
252 * @flowid - flow id.
253 *
254 * Does the rnode lookup on the given lnode and flowid. If no matching
255 * rnode found, then new rnode with given npid is allocated and returned.
256 */
257static struct csio_rnode *
258csio_get_rnode(struct csio_lnode *ln, uint32_t flowid)
259{
260 struct csio_rnode *rn;
261
262 rn = csio_rn_lookup(ln, flowid);
263 if (!rn) {
264 rn = csio_alloc_rnode(ln);
265 if (!rn)
266 return NULL;
267
268 rn->flowid = flowid;
269 }
270
271 return rn;
272}
273
274/*
275 * csio_put_rnode - Frees the given rnode
276 * @ln - lnode
277 * @flowid - flow id.
278 *
279 * Does the rnode lookup on the given lnode and flowid. If no matching
280 * rnode found, then new rnode with given npid is allocated and returned.
281 */
282void
283csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn)
284{
285 CSIO_DB_ASSERT(csio_is_rnode_uninit(rn) != 0);
286 csio_free_rnode(rn);
287}
288
289/*
290 * csio_confirm_rnode - confirms rnode based on wwpn.
291 * @ln: lnode
292 * @rdev_flowid: remote device flowid
293 * @rdevp: remote device params
294 * This routines searches other rnode in list having same wwpn of new rnode.
295 * If there is a match, then matched rnode is returned and otherwise new rnode
296 * is returned.
297 * returns rnode.
298 */
299struct csio_rnode *
300csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
301 struct fcoe_rdev_entry *rdevp)
302{
303 uint8_t rport_type;
304 struct csio_rnode *rn, *match_rn;
305 uint32_t vnp_flowid;
306 uint32_t *port_id;
307
308 port_id = (uint32_t *)&rdevp->r_id[0];
309 rport_type =
310 FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
311
312 /* Drop rdev event for cntrl port */
313 if (rport_type == FAB_CTLR_VNPORT) {
314 csio_ln_dbg(ln,
315 "Unhandled rport_type:%d recv in rdev evt "
316 "ssni:x%x\n", rport_type, rdev_flowid);
317 return NULL;
318 }
319
320 /* Lookup on flowid */
321 rn = csio_rn_lookup(ln, rdev_flowid);
322 if (!rn) {
323
324 /* Drop events with duplicate flowid */
325 if (csio_rn_dup_flowid(ln, rdev_flowid, &vnp_flowid)) {
326 csio_ln_warn(ln,
327 "ssni:%x already active on vnpi:%x",
328 rdev_flowid, vnp_flowid);
329 return NULL;
330 }
331
332 /* Lookup on wwpn for NPORTs */
333 rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
334 if (!rn)
335 goto alloc_rnode;
336
337 } else {
338 /* Lookup well-known ports with nport id */
339 if (csio_is_rnode_wka(rport_type)) {
340 match_rn = csio_rnode_lookup_portid(ln,
341 ((ntohl(*port_id) >> 8) & CSIO_DID_MASK));
342 if (match_rn == NULL) {
343 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
344 goto alloc_rnode;
345 }
346
347 /*
348 * Now compare the wwpn to confirm that
349 * same port relogged in. If so update the matched rn.
350 * Else, go ahead and alloc a new rnode.
351 */
352 if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {
353 if (csio_is_rnode_ready(rn)) {
354 csio_ln_warn(ln,
355 "rnode is already"
356 "active ssni:x%x\n",
357 rdev_flowid);
358 CSIO_ASSERT(0);
359 }
360 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
361 rn = match_rn;
362
363 /* Update rn */
364 goto found_rnode;
365 }
366 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
367 goto alloc_rnode;
368 }
369
370 /* wwpn match */
371 if (!memcmp(csio_rn_wwpn(rn), rdevp->wwpn, 8))
372 goto found_rnode;
373
374 /* Search for rnode that have same wwpn */
375 match_rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
376 if (match_rn != NULL) {
377 csio_ln_dbg(ln,
378 "ssni:x%x changed for rport name(wwpn):%llx "
379 "did:x%x\n", rdev_flowid,
380 wwn_to_u64(rdevp->wwpn),
381 match_rn->nport_id);
382 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
383 rn = match_rn;
384 } else {
385 csio_ln_dbg(ln,
386 "rnode wwpn mismatch found ssni:x%x "
387 "name(wwpn):%llx\n",
388 rdev_flowid,
389 wwn_to_u64(csio_rn_wwpn(rn)));
390 if (csio_is_rnode_ready(rn)) {
391 csio_ln_warn(ln,
392 "rnode is already active "
393 "wwpn:%llx ssni:x%x\n",
394 wwn_to_u64(csio_rn_wwpn(rn)),
395 rdev_flowid);
396 CSIO_ASSERT(0);
397 }
398 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
399 goto alloc_rnode;
400 }
401 }
402
403found_rnode:
404 csio_ln_dbg(ln, "found rnode:%p ssni:x%x name(wwpn):%llx\n",
405 rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
406
407 /* Update flowid */
408 csio_rn_flowid(rn) = rdev_flowid;
409
410 /* update rdev entry */
411 rn->rdev_entry = rdevp;
412 CSIO_INC_STATS(ln, n_rnode_match);
413 return rn;
414
415alloc_rnode:
416 rn = csio_get_rnode(ln, rdev_flowid);
417 if (!rn)
418 return NULL;
419
420 csio_ln_dbg(ln, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n",
421 rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
422
423 /* update rdev entry */
424 rn->rdev_entry = rdevp;
425 return rn;
426}
427
428/*
429 * csio_rn_verify_rparams - verify rparams.
430 * @ln: lnode
431 * @rn: rnode
432 * @rdevp: remote device params
433 * returns success if rparams are verified.
434 */
435static int
436csio_rn_verify_rparams(struct csio_lnode *ln, struct csio_rnode *rn,
437 struct fcoe_rdev_entry *rdevp)
438{
439 uint8_t null[8];
440 uint8_t rport_type;
441 uint8_t fc_class;
442 uint32_t *did;
443
444 did = (uint32_t *) &rdevp->r_id[0];
445 rport_type =
446 FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
447 switch (rport_type) {
448 case FLOGI_VFPORT:
449 rn->role = CSIO_RNFR_FABRIC;
450 if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_FLOGI) {
451 csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
452 csio_rn_flowid(rn));
453 return -EINVAL;
454 }
455 /* NPIV support */
456 if (FW_RDEV_WR_NPIV_GET(rdevp->vft_to_qos))
457 ln->flags |= CSIO_LNF_NPIVSUPP;
458
459 break;
460
461 case NS_VNPORT:
462 rn->role = CSIO_RNFR_NS;
463 if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_DIR_SERV) {
464 csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
465 csio_rn_flowid(rn));
466 return -EINVAL;
467 }
468 break;
469
470 case REG_FC4_VNPORT:
471 case REG_VNPORT:
472 rn->role = CSIO_RNFR_NPORT;
473 if (rdevp->event_cause == PRLI_ACC_RCVD ||
474 rdevp->event_cause == PRLI_RCVD) {
475 if (FW_RDEV_WR_TASK_RETRY_ID_GET(
476 rdevp->enh_disc_to_tgt))
477 rn->fcp_flags |= FCP_SPPF_OVLY_ALLOW;
478
479 if (FW_RDEV_WR_RETRY_GET(rdevp->enh_disc_to_tgt))
480 rn->fcp_flags |= FCP_SPPF_RETRY;
481
482 if (FW_RDEV_WR_CONF_CMPL_GET(rdevp->enh_disc_to_tgt))
483 rn->fcp_flags |= FCP_SPPF_CONF_COMPL;
484
485 if (FW_RDEV_WR_TGT_GET(rdevp->enh_disc_to_tgt))
486 rn->role |= CSIO_RNFR_TARGET;
487
488 if (FW_RDEV_WR_INI_GET(rdevp->enh_disc_to_tgt))
489 rn->role |= CSIO_RNFR_INITIATOR;
490 }
491
492 break;
493
494 case FDMI_VNPORT:
495 case FAB_CTLR_VNPORT:
496 rn->role = 0;
497 break;
498
499 default:
500 csio_ln_err(ln, "ssni:x%x invalid rport type recv x%x\n",
501 csio_rn_flowid(rn), rport_type);
502 return -EINVAL;
503 }
504
505 /* validate wwpn/wwnn for Name server/remote port */
506 if (rport_type == REG_VNPORT || rport_type == NS_VNPORT) {
507 memset(null, 0, 8);
508 if (!memcmp(rdevp->wwnn, null, 8)) {
509 csio_ln_err(ln,
510 "ssni:x%x invalid wwnn received from"
511 " rport did:x%x\n",
512 csio_rn_flowid(rn),
513 (ntohl(*did) & CSIO_DID_MASK));
514 return -EINVAL;
515 }
516
517 if (!memcmp(rdevp->wwpn, null, 8)) {
518 csio_ln_err(ln,
519 "ssni:x%x invalid wwpn received from"
520 " rport did:x%x\n",
521 csio_rn_flowid(rn),
522 (ntohl(*did) & CSIO_DID_MASK));
523 return -EINVAL;
524 }
525
526 }
527
528 /* Copy wwnn, wwpn and nport id */
529 rn->nport_id = (ntohl(*did) >> 8) & CSIO_DID_MASK;
530 memcpy(csio_rn_wwnn(rn), rdevp->wwnn, 8);
531 memcpy(csio_rn_wwpn(rn), rdevp->wwpn, 8);
532 rn->rn_sparm.csp.sp_bb_data = ntohs(rdevp->rcv_fr_sz);
533 fc_class = FW_RDEV_WR_CLASS_GET(rdevp->vft_to_qos);
534 rn->rn_sparm.clsp[fc_class - 1].cp_class = htons(FC_CPC_VALID);
535 return 0;
536}
537
538static void
539__csio_reg_rnode(struct csio_rnode *rn)
540{
541 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
542 struct csio_hw *hw = csio_lnode_to_hw(ln);
543
544 spin_unlock_irq(&hw->lock);
545 csio_reg_rnode(rn);
546 spin_lock_irq(&hw->lock);
547
548 if (rn->role & CSIO_RNFR_TARGET)
549 ln->n_scsi_tgts++;
550
551 if (rn->nport_id == FC_FID_MGMT_SERV)
552 csio_ln_fdmi_start(ln, (void *) rn);
553}
554
555static void
556__csio_unreg_rnode(struct csio_rnode *rn)
557{
558 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
559 struct csio_hw *hw = csio_lnode_to_hw(ln);
560 LIST_HEAD(tmp_q);
561 int cmpl = 0;
562
563 if (!list_empty(&rn->host_cmpl_q)) {
564 csio_dbg(hw, "Returning completion queue I/Os\n");
565 list_splice_tail_init(&rn->host_cmpl_q, &tmp_q);
566 cmpl = 1;
567 }
568
569 if (rn->role & CSIO_RNFR_TARGET) {
570 ln->n_scsi_tgts--;
571 ln->last_scan_ntgts--;
572 }
573
574 spin_unlock_irq(&hw->lock);
575 csio_unreg_rnode(rn);
576 spin_lock_irq(&hw->lock);
577
578 /* Cleanup I/Os that were waiting for rnode to unregister */
579 if (cmpl)
580 csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw), &tmp_q);
581
582}
583
584/*****************************************************************************/
585/* START: Rnode SM */
586/*****************************************************************************/
587
588/*
589 * csio_rns_uninit -
590 * @rn - rnode
591 * @evt - SM event.
592 *
593 */
594static void
595csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt)
596{
597 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
598 int ret = 0;
599
600 CSIO_INC_STATS(rn, n_evt_sm[evt]);
601
602 switch (evt) {
603 case CSIO_RNFE_LOGGED_IN:
604 case CSIO_RNFE_PLOGI_RECV:
605 ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
606 if (!ret) {
607 csio_set_state(&rn->sm, csio_rns_ready);
608 __csio_reg_rnode(rn);
609 } else {
610 CSIO_INC_STATS(rn, n_err_inval);
611 }
612 break;
613 case CSIO_RNFE_LOGO_RECV:
614 csio_ln_dbg(ln,
615 "ssni:x%x Ignoring event %d recv "
616 "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
617 CSIO_INC_STATS(rn, n_evt_drop);
618 break;
619 default:
620 csio_ln_dbg(ln,
621 "ssni:x%x unexp event %d recv "
622 "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
623 CSIO_INC_STATS(rn, n_evt_unexp);
624 break;
625 }
626}
627
628/*
629 * csio_rns_ready -
630 * @rn - rnode
631 * @evt - SM event.
632 *
633 */
634static void
635csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt)
636{
637 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
638 int ret = 0;
639
640 CSIO_INC_STATS(rn, n_evt_sm[evt]);
641
642 switch (evt) {
643 case CSIO_RNFE_LOGGED_IN:
644 case CSIO_RNFE_PLOGI_RECV:
645 csio_ln_dbg(ln,
646 "ssni:x%x Ignoring event %d recv from did:x%x "
647 "in rn state[ready]\n", csio_rn_flowid(rn), evt,
648 rn->nport_id);
649 CSIO_INC_STATS(rn, n_evt_drop);
650 break;
651
652 case CSIO_RNFE_PRLI_DONE:
653 case CSIO_RNFE_PRLI_RECV:
654 ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
655 if (!ret)
656 __csio_reg_rnode(rn);
657 else
658 CSIO_INC_STATS(rn, n_err_inval);
659
660 break;
661 case CSIO_RNFE_DOWN:
662 csio_set_state(&rn->sm, csio_rns_offline);
663 __csio_unreg_rnode(rn);
664
665 /* FW expected to internally aborted outstanding SCSI WRs
666 * and return all SCSI WRs to host with status "ABORTED".
667 */
668 break;
669
670 case CSIO_RNFE_LOGO_RECV:
671 csio_set_state(&rn->sm, csio_rns_offline);
672
673 __csio_unreg_rnode(rn);
674
675 /* FW expected to internally aborted outstanding SCSI WRs
676 * and return all SCSI WRs to host with status "ABORTED".
677 */
678 break;
679
680 case CSIO_RNFE_CLOSE:
681 /*
682 * Each rnode receives CLOSE event when driver is removed or
683 * device is reset
684 * Note: All outstanding IOs on remote port need to returned
685 * to uppper layer with appropriate error before sending
686 * CLOSE event
687 */
688 csio_set_state(&rn->sm, csio_rns_uninit);
689 __csio_unreg_rnode(rn);
690 break;
691
692 case CSIO_RNFE_NAME_MISSING:
693 csio_set_state(&rn->sm, csio_rns_disappeared);
694 __csio_unreg_rnode(rn);
695
696 /*
697 * FW expected to internally aborted outstanding SCSI WRs
698 * and return all SCSI WRs to host with status "ABORTED".
699 */
700
701 break;
702
703 default:
704 csio_ln_dbg(ln,
705 "ssni:x%x unexp event %d recv from did:x%x "
706 "in rn state[uninit]\n", csio_rn_flowid(rn), evt,
707 rn->nport_id);
708 CSIO_INC_STATS(rn, n_evt_unexp);
709 break;
710 }
711}
712
713/*
714 * csio_rns_offline -
715 * @rn - rnode
716 * @evt - SM event.
717 *
718 */
719static void
720csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt)
721{
722 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
723 int ret = 0;
724
725 CSIO_INC_STATS(rn, n_evt_sm[evt]);
726
727 switch (evt) {
728 case CSIO_RNFE_LOGGED_IN:
729 case CSIO_RNFE_PLOGI_RECV:
730 ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
731 if (!ret) {
732 csio_set_state(&rn->sm, csio_rns_ready);
733 __csio_reg_rnode(rn);
734 } else {
735 CSIO_INC_STATS(rn, n_err_inval);
736 csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
737 }
738 break;
739
740 case CSIO_RNFE_DOWN:
741 csio_ln_dbg(ln,
742 "ssni:x%x Ignoring event %d recv from did:x%x "
743 "in rn state[offline]\n", csio_rn_flowid(rn), evt,
744 rn->nport_id);
745 CSIO_INC_STATS(rn, n_evt_drop);
746 break;
747
748 case CSIO_RNFE_CLOSE:
749 /* Each rnode receives CLOSE event when driver is removed or
750 * device is reset
751 * Note: All outstanding IOs on remote port need to returned
752 * to uppper layer with appropriate error before sending
753 * CLOSE event
754 */
755 csio_set_state(&rn->sm, csio_rns_uninit);
756 break;
757
758 case CSIO_RNFE_NAME_MISSING:
759 csio_set_state(&rn->sm, csio_rns_disappeared);
760 break;
761
762 default:
763 csio_ln_dbg(ln,
764 "ssni:x%x unexp event %d recv from did:x%x "
765 "in rn state[offline]\n", csio_rn_flowid(rn), evt,
766 rn->nport_id);
767 CSIO_INC_STATS(rn, n_evt_unexp);
768 break;
769 }
770}
771
772/*
773 * csio_rns_disappeared -
774 * @rn - rnode
775 * @evt - SM event.
776 *
777 */
778static void
779csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt)
780{
781 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
782 int ret = 0;
783
784 CSIO_INC_STATS(rn, n_evt_sm[evt]);
785
786 switch (evt) {
787 case CSIO_RNFE_LOGGED_IN:
788 case CSIO_RNFE_PLOGI_RECV:
789 ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
790 if (!ret) {
791 csio_set_state(&rn->sm, csio_rns_ready);
792 __csio_reg_rnode(rn);
793 } else {
794 CSIO_INC_STATS(rn, n_err_inval);
795 csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
796 }
797 break;
798
799 case CSIO_RNFE_CLOSE:
800 /* Each rnode receives CLOSE event when driver is removed or
801 * device is reset.
802 * Note: All outstanding IOs on remote port need to returned
803 * to uppper layer with appropriate error before sending
804 * CLOSE event
805 */
806 csio_set_state(&rn->sm, csio_rns_uninit);
807 break;
808
809 case CSIO_RNFE_DOWN:
810 case CSIO_RNFE_NAME_MISSING:
811 csio_ln_dbg(ln,
812 "ssni:x%x Ignoring event %d recv from did x%x"
813 "in rn state[disappeared]\n", csio_rn_flowid(rn),
814 evt, rn->nport_id);
815 break;
816
817 default:
818 csio_ln_dbg(ln,
819 "ssni:x%x unexp event %d recv from did x%x"
820 "in rn state[disappeared]\n", csio_rn_flowid(rn),
821 evt, rn->nport_id);
822 CSIO_INC_STATS(rn, n_evt_unexp);
823 break;
824 }
825}
826
827/*****************************************************************************/
828/* END: Rnode SM */
829/*****************************************************************************/
830
831/*
832 * csio_rnode_devloss_handler - Device loss event handler
833 * @rn: rnode
834 *
835 * Post event to close rnode SM and free rnode.
836 */
837void
838csio_rnode_devloss_handler(struct csio_rnode *rn)
839{
840 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
841
842 /* ignore if same rnode came back as online */
843 if (csio_is_rnode_ready(rn))
844 return;
845
846 csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
847
848 /* Free rn if in uninit state */
849 if (csio_is_rnode_uninit(rn))
850 csio_put_rnode(ln, rn);
851}
852
853/**
854 * csio_rnode_fwevt_handler - Event handler for firmware rnode events.
855 * @rn: rnode
856 *
857 */
858void
859csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt)
860{
861 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
862 enum csio_rn_ev evt;
863
864 evt = CSIO_FWE_TO_RNFE(fwevt);
865 if (!evt) {
866 csio_ln_err(ln, "ssni:x%x Unhandled FW Rdev event: %d\n",
867 csio_rn_flowid(rn), fwevt);
868 CSIO_INC_STATS(rn, n_evt_unexp);
869 return;
870 }
871 CSIO_INC_STATS(rn, n_evt_fw[fwevt]);
872
873 /* Track previous & current events for debugging */
874 rn->prev_evt = rn->cur_evt;
875 rn->cur_evt = fwevt;
876
877 /* Post event to rnode SM */
878 csio_post_event(&rn->sm, evt);
879
880 /* Free rn if in uninit state */
881 if (csio_is_rnode_uninit(rn))
882 csio_put_rnode(ln, rn);
883}
884
885/*
886 * csio_rnode_init - Initialize rnode.
887 * @rn: RNode
888 * @ln: Associated lnode
889 *
890 * Caller is responsible for holding the lock. The lock is required
891 * to be held for inserting the rnode in ln->rnhead list.
892 */
893static int
894csio_rnode_init(struct csio_rnode *rn, struct csio_lnode *ln)
895{
896 csio_rnode_to_lnode(rn) = ln;
897 csio_init_state(&rn->sm, csio_rns_uninit);
898 INIT_LIST_HEAD(&rn->host_cmpl_q);
899 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
900
901 /* Add rnode to list of lnodes->rnhead */
902 list_add_tail(&rn->sm.sm_list, &ln->rnhead);
903
904 return 0;
905}
906
907static void
908csio_rnode_exit(struct csio_rnode *rn)
909{
910 list_del_init(&rn->sm.sm_list);
911 CSIO_DB_ASSERT(list_empty(&rn->host_cmpl_q));
912}