aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/libfc/fc_exch.c
diff options
context:
space:
mode:
authorRobert Love <robert.w.love@intel.com>2008-12-09 18:10:17 -0500
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-12-29 12:24:33 -0500
commit42e9a92fe6a9095bd68a379aaec7ad2be0337f7a (patch)
tree344f8d9f72a3d926d652632abb8d319f8e32343a /drivers/scsi/libfc/fc_exch.c
parentf032c2f7cdaae0e8907cd3b26426fc651dc5c275 (diff)
[SCSI] libfc: A modular Fibre Channel library
libFC is composed of 4 blocks supported by an exchange manager and a framing library. The upper 4 layers are fc_lport, fc_disc, fc_rport and fc_fcp. A LLD that uses libfc could choose to either use libfc's block, or using the transport template defined in libfc.h, override one or more blocks with its own implementation. The EM (Exchange Manager) manages exhcanges/sequences for all commands- ELS, CT and FCP. The framing library frames ELS and CT commands. The fc_lport block manages the library's representation of the host's FC enabled ports. The fc_disc block manages discovery of targets as well as handling changes that occur in the FC fabric (via. RSCN events). The fc_rport block manages the library's representation of other entities in the FC fabric. Currently the library uses this block for targets, its peer when in point-to-point mode and the directory server, but can be extended for other entities if needed. The fc_fcp block interacts with the scsi-ml and handles all I/O. Signed-off-by: Robert Love <robert.w.love@intel.com> [jejb: added include of delay.h to fix ppc64 compile prob spotted by sfr] Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/libfc/fc_exch.c')
-rw-r--r--drivers/scsi/libfc/fc_exch.c1970
1 files changed, 1970 insertions, 0 deletions
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
new file mode 100644
index 000000000000..66db08a5f27f
--- /dev/null
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -0,0 +1,1970 @@
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
4 * Copyright(c) 2008 Mike Christie
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Maintained at www.Open-FCoE.org
20 */
21
22/*
23 * Fibre Channel exchange and sequence handling.
24 */
25
26#include <linux/timer.h>
27#include <linux/gfp.h>
28#include <linux/err.h>
29
30#include <scsi/fc/fc_fc2.h>
31
32#include <scsi/libfc.h>
33#include <scsi/fc_encode.h>
34
35#define FC_DEF_R_A_TOV (10 * 1000) /* resource allocation timeout */
36
37/*
38 * fc_exch_debug can be set in debugger or at compile time to get more logs.
39 */
40static int fc_exch_debug;
41
42#define FC_DEBUG_EXCH(fmt...) \
43 do { \
44 if (fc_exch_debug) \
45 FC_DBG(fmt); \
46 } while (0)
47
48static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
49
50/*
51 * Structure and function definitions for managing Fibre Channel Exchanges
52 * and Sequences.
53 *
54 * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
55 *
56 * fc_exch_mgr holds the exchange state for an N port
57 *
58 * fc_exch holds state for one exchange and links to its active sequence.
59 *
60 * fc_seq holds the state for an individual sequence.
61 */
62
63/*
64 * Exchange manager.
65 *
66 * This structure is the center for creating exchanges and sequences.
67 * It manages the allocation of exchange IDs.
68 */
69struct fc_exch_mgr {
70 enum fc_class class; /* default class for sequences */
71 spinlock_t em_lock; /* exchange manager lock,
72 must be taken before ex_lock */
73 u16 last_xid; /* last allocated exchange ID */
74 u16 min_xid; /* min exchange ID */
75 u16 max_xid; /* max exchange ID */
76 u16 max_read; /* max exchange ID for read */
77 u16 last_read; /* last xid allocated for read */
78 u32 total_exches; /* total allocated exchanges */
79 struct list_head ex_list; /* allocated exchanges list */
80 struct fc_lport *lp; /* fc device instance */
81 mempool_t *ep_pool; /* reserve ep's */
82
83 /*
84 * currently exchange mgr stats are updated but not used.
85 * either stats can be expose via sysfs or remove them
86 * all together if not used XXX
87 */
88 struct {
89 atomic_t no_free_exch;
90 atomic_t no_free_exch_xid;
91 atomic_t xid_not_found;
92 atomic_t xid_busy;
93 atomic_t seq_not_found;
94 atomic_t non_bls_resp;
95 } stats;
96 struct fc_exch **exches; /* for exch pointers indexed by xid */
97};
98#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
99
100static void fc_exch_rrq(struct fc_exch *);
101static void fc_seq_ls_acc(struct fc_seq *);
102static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
103 enum fc_els_rjt_explan);
104static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *);
105static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *);
106static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
107
108/*
109 * Internal implementation notes.
110 *
111 * The exchange manager is one by default in libfc but LLD may choose
112 * to have one per CPU. The sequence manager is one per exchange manager
113 * and currently never separated.
114 *
115 * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
116 * assigned by the Sequence Initiator that shall be unique for a specific
117 * D_ID and S_ID pair while the Sequence is open." Note that it isn't
118 * qualified by exchange ID, which one might think it would be.
119 * In practice this limits the number of open sequences and exchanges to 256
120 * per session. For most targets we could treat this limit as per exchange.
121 *
122 * The exchange and its sequence are freed when the last sequence is received.
123 * It's possible for the remote port to leave an exchange open without
124 * sending any sequences.
125 *
126 * Notes on reference counts:
127 *
128 * Exchanges are reference counted and exchange gets freed when the reference
129 * count becomes zero.
130 *
131 * Timeouts:
132 * Sequences are timed out for E_D_TOV and R_A_TOV.
133 *
134 * Sequence event handling:
135 *
136 * The following events may occur on initiator sequences:
137 *
138 * Send.
139 * For now, the whole thing is sent.
140 * Receive ACK
141 * This applies only to class F.
142 * The sequence is marked complete.
143 * ULP completion.
144 * The upper layer calls fc_exch_done() when done
145 * with exchange and sequence tuple.
146 * RX-inferred completion.
147 * When we receive the next sequence on the same exchange, we can
148 * retire the previous sequence ID. (XXX not implemented).
149 * Timeout.
150 * R_A_TOV frees the sequence ID. If we're waiting for ACK,
151 * E_D_TOV causes abort and calls upper layer response handler
152 * with FC_EX_TIMEOUT error.
153 * Receive RJT
154 * XXX defer.
155 * Send ABTS
156 * On timeout.
157 *
158 * The following events may occur on recipient sequences:
159 *
160 * Receive
161 * Allocate sequence for first frame received.
162 * Hold during receive handler.
163 * Release when final frame received.
164 * Keep status of last N of these for the ELS RES command. XXX TBD.
165 * Receive ABTS
166 * Deallocate sequence
167 * Send RJT
168 * Deallocate
169 *
170 * For now, we neglect conditions where only part of a sequence was
171 * received or transmitted, or where out-of-order receipt is detected.
172 */
173
174/*
175 * Locking notes:
176 *
177 * The EM code run in a per-CPU worker thread.
178 *
179 * To protect against concurrency between a worker thread code and timers,
180 * sequence allocation and deallocation must be locked.
181 * - exchange refcnt can be done atomicly without locks.
182 * - sequence allocation must be locked by exch lock.
183 * - If the em_lock and ex_lock must be taken at the same time, then the
184 * em_lock must be taken before the ex_lock.
185 */
186
187/*
188 * opcode names for debugging.
189 */
190static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
191
192#define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0]))
193
194static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
195 unsigned int max_index)
196{
197 const char *name = NULL;
198
199 if (op < max_index)
200 name = table[op];
201 if (!name)
202 name = "unknown";
203 return name;
204}
205
206static const char *fc_exch_rctl_name(unsigned int op)
207{
208 return fc_exch_name_lookup(op, fc_exch_rctl_names,
209 FC_TABLE_SIZE(fc_exch_rctl_names));
210}
211
212/*
213 * Hold an exchange - keep it from being freed.
214 */
215static void fc_exch_hold(struct fc_exch *ep)
216{
217 atomic_inc(&ep->ex_refcnt);
218}
219
220/*
221 * setup fc hdr by initializing few more FC header fields and sof/eof.
222 * Initialized fields by this func:
223 * - fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt
224 * - sof and eof
225 */
226static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
227 u32 f_ctl)
228{
229 struct fc_frame_header *fh = fc_frame_header_get(fp);
230 u16 fill;
231
232 fr_sof(fp) = ep->class;
233 if (ep->seq.cnt)
234 fr_sof(fp) = fc_sof_normal(ep->class);
235
236 if (f_ctl & FC_FC_END_SEQ) {
237 fr_eof(fp) = FC_EOF_T;
238 if (fc_sof_needs_ack(ep->class))
239 fr_eof(fp) = FC_EOF_N;
240 /*
241 * Form f_ctl.
242 * The number of fill bytes to make the length a 4-byte
243 * multiple is the low order 2-bits of the f_ctl.
244 * The fill itself will have been cleared by the frame
245 * allocation.
246 * After this, the length will be even, as expected by
247 * the transport.
248 */
249 fill = fr_len(fp) & 3;
250 if (fill) {
251 fill = 4 - fill;
252 /* TODO, this may be a problem with fragmented skb */
253 skb_put(fp_skb(fp), fill);
254 hton24(fh->fh_f_ctl, f_ctl | fill);
255 }
256 } else {
257 WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
258 fr_eof(fp) = FC_EOF_N;
259 }
260
261 /*
262 * Initialize remainig fh fields
263 * from fc_fill_fc_hdr
264 */
265 fh->fh_ox_id = htons(ep->oxid);
266 fh->fh_rx_id = htons(ep->rxid);
267 fh->fh_seq_id = ep->seq.id;
268 fh->fh_seq_cnt = htons(ep->seq.cnt);
269}
270
271
272/*
273 * Release a reference to an exchange.
274 * If the refcnt goes to zero and the exchange is complete, it is freed.
275 */
276static void fc_exch_release(struct fc_exch *ep)
277{
278 struct fc_exch_mgr *mp;
279
280 if (atomic_dec_and_test(&ep->ex_refcnt)) {
281 mp = ep->em;
282 if (ep->destructor)
283 ep->destructor(&ep->seq, ep->arg);
284 if (ep->lp->tt.exch_put)
285 ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
286 WARN_ON(!ep->esb_stat & ESB_ST_COMPLETE);
287 mempool_free(ep, mp->ep_pool);
288 }
289}
290
291static int fc_exch_done_locked(struct fc_exch *ep)
292{
293 int rc = 1;
294
295 /*
296 * We must check for completion in case there are two threads
297 * tyring to complete this. But the rrq code will reuse the
298 * ep, and in that case we only clear the resp and set it as
299 * complete, so it can be reused by the timer to send the rrq.
300 */
301 ep->resp = NULL;
302 if (ep->state & FC_EX_DONE)
303 return rc;
304 ep->esb_stat |= ESB_ST_COMPLETE;
305
306 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
307 ep->state |= FC_EX_DONE;
308 if (cancel_delayed_work(&ep->timeout_work))
309 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
310 rc = 0;
311 }
312 return rc;
313}
314
315static void fc_exch_mgr_delete_ep(struct fc_exch *ep)
316{
317 struct fc_exch_mgr *mp;
318
319 mp = ep->em;
320 spin_lock_bh(&mp->em_lock);
321 WARN_ON(mp->total_exches <= 0);
322 mp->total_exches--;
323 mp->exches[ep->xid - mp->min_xid] = NULL;
324 list_del(&ep->ex_list);
325 spin_unlock_bh(&mp->em_lock);
326 fc_exch_release(ep); /* drop hold for exch in mp */
327}
328
329/*
330 * Internal version of fc_exch_timer_set - used with lock held.
331 */
332static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
333 unsigned int timer_msec)
334{
335 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
336 return;
337
338 FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n",
339 ep->xid);
340 if (schedule_delayed_work(&ep->timeout_work,
341 msecs_to_jiffies(timer_msec)))
342 fc_exch_hold(ep); /* hold for timer */
343}
344
345/*
346 * Set timer for an exchange.
347 * The time is a minimum delay in milliseconds until the timer fires.
348 * Used for upper level protocols to time out the exchange.
349 * The timer is cancelled when it fires or when the exchange completes.
350 * Returns non-zero if a timer couldn't be allocated.
351 */
352static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
353{
354 spin_lock_bh(&ep->ex_lock);
355 fc_exch_timer_set_locked(ep, timer_msec);
356 spin_unlock_bh(&ep->ex_lock);
357}
358
359int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
360{
361 struct fc_seq *sp;
362 struct fc_exch *ep;
363 struct fc_frame *fp;
364 int error;
365
366 ep = fc_seq_exch(req_sp);
367
368 spin_lock_bh(&ep->ex_lock);
369 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
370 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
371 spin_unlock_bh(&ep->ex_lock);
372 return -ENXIO;
373 }
374
375 /*
376 * Send the abort on a new sequence if possible.
377 */
378 sp = fc_seq_start_next_locked(&ep->seq);
379 if (!sp) {
380 spin_unlock_bh(&ep->ex_lock);
381 return -ENOMEM;
382 }
383
384 ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
385 if (timer_msec)
386 fc_exch_timer_set_locked(ep, timer_msec);
387 spin_unlock_bh(&ep->ex_lock);
388
389 /*
390 * If not logged into the fabric, don't send ABTS but leave
391 * sequence active until next timeout.
392 */
393 if (!ep->sid)
394 return 0;
395
396 /*
397 * Send an abort for the sequence that timed out.
398 */
399 fp = fc_frame_alloc(ep->lp, 0);
400 if (fp) {
401 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
402 FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
403 error = fc_seq_send(ep->lp, sp, fp);
404 } else
405 error = -ENOBUFS;
406 return error;
407}
408EXPORT_SYMBOL(fc_seq_exch_abort);
409
410/*
411 * Exchange timeout - handle exchange timer expiration.
412 * The timer will have been cancelled before this is called.
413 */
414static void fc_exch_timeout(struct work_struct *work)
415{
416 struct fc_exch *ep = container_of(work, struct fc_exch,
417 timeout_work.work);
418 struct fc_seq *sp = &ep->seq;
419 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
420 void *arg;
421 u32 e_stat;
422 int rc = 1;
423
424 spin_lock_bh(&ep->ex_lock);
425 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
426 goto unlock;
427
428 e_stat = ep->esb_stat;
429 if (e_stat & ESB_ST_COMPLETE) {
430 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
431 if (e_stat & ESB_ST_REC_QUAL)
432 fc_exch_rrq(ep);
433 spin_unlock_bh(&ep->ex_lock);
434 goto done;
435 } else {
436 resp = ep->resp;
437 arg = ep->arg;
438 ep->resp = NULL;
439 if (e_stat & ESB_ST_ABNORMAL)
440 rc = fc_exch_done_locked(ep);
441 spin_unlock_bh(&ep->ex_lock);
442 if (!rc)
443 fc_exch_mgr_delete_ep(ep);
444 if (resp)
445 resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
446 fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
447 goto done;
448 }
449unlock:
450 spin_unlock_bh(&ep->ex_lock);
451done:
452 /*
453 * This release matches the hold taken when the timer was set.
454 */
455 fc_exch_release(ep);
456}
457
458/*
459 * Allocate a sequence.
460 *
461 * We don't support multiple originated sequences on the same exchange.
462 * By implication, any previously originated sequence on this exchange
463 * is complete, and we reallocate the same sequence.
464 */
465static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
466{
467 struct fc_seq *sp;
468
469 sp = &ep->seq;
470 sp->ssb_stat = 0;
471 sp->cnt = 0;
472 sp->id = seq_id;
473 return sp;
474}
475
476/*
477 * fc_em_alloc_xid - returns an xid based on request type
478 * @lp : ptr to associated lport
479 * @fp : ptr to the assocated frame
480 *
481 * check the associated fc_fsp_pkt to get scsi command type and
482 * command direction to decide from which range this exch id
483 * will be allocated from.
484 *
485 * Returns : 0 or an valid xid
486 */
487static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp)
488{
489 u16 xid, min, max;
490 u16 *plast;
491 struct fc_exch *ep = NULL;
492
493 if (mp->max_read) {
494 if (fc_frame_is_read(fp)) {
495 min = mp->min_xid;
496 max = mp->max_read;
497 plast = &mp->last_read;
498 } else {
499 min = mp->max_read + 1;
500 max = mp->max_xid;
501 plast = &mp->last_xid;
502 }
503 } else {
504 min = mp->min_xid;
505 max = mp->max_xid;
506 plast = &mp->last_xid;
507 }
508 xid = *plast;
509 do {
510 xid = (xid == max) ? min : xid + 1;
511 ep = mp->exches[xid - mp->min_xid];
512 } while ((ep != NULL) && (xid != *plast));
513
514 if (unlikely(ep))
515 xid = 0;
516 else
517 *plast = xid;
518
519 return xid;
520}
521
522/*
523 * fc_exch_alloc - allocate an exchange.
524 * @mp : ptr to the exchange manager
525 * @xid: input xid
526 *
527 * if xid is supplied zero then assign next free exchange ID
528 * from exchange manager, otherwise use supplied xid.
529 * Returns with exch lock held.
530 */
531struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
532 struct fc_frame *fp, u16 xid)
533{
534 struct fc_exch *ep;
535
536 /* allocate memory for exchange */
537 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
538 if (!ep) {
539 atomic_inc(&mp->stats.no_free_exch);
540 goto out;
541 }
542 memset(ep, 0, sizeof(*ep));
543
544 spin_lock_bh(&mp->em_lock);
545 /* alloc xid if input xid 0 */
546 if (!xid) {
547 /* alloc a new xid */
548 xid = fc_em_alloc_xid(mp, fp);
549 if (!xid) {
550 printk(KERN_ERR "fc_em_alloc_xid() failed\n");
551 goto err;
552 }
553 }
554
555 fc_exch_hold(ep); /* hold for exch in mp */
556 spin_lock_init(&ep->ex_lock);
557 /*
558 * Hold exch lock for caller to prevent fc_exch_reset()
559 * from releasing exch while fc_exch_alloc() caller is
560 * still working on exch.
561 */
562 spin_lock_bh(&ep->ex_lock);
563
564 mp->exches[xid - mp->min_xid] = ep;
565 list_add_tail(&ep->ex_list, &mp->ex_list);
566 fc_seq_alloc(ep, ep->seq_id++);
567 mp->total_exches++;
568 spin_unlock_bh(&mp->em_lock);
569
570 /*
571 * update exchange
572 */
573 ep->oxid = ep->xid = xid;
574 ep->em = mp;
575 ep->lp = mp->lp;
576 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
577 ep->rxid = FC_XID_UNKNOWN;
578 ep->class = mp->class;
579 INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
580out:
581 return ep;
582err:
583 spin_unlock_bh(&mp->em_lock);
584 atomic_inc(&mp->stats.no_free_exch_xid);
585 mempool_free(ep, mp->ep_pool);
586 return NULL;
587}
588EXPORT_SYMBOL(fc_exch_alloc);
589
590/*
591 * Lookup and hold an exchange.
592 */
593static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
594{
595 struct fc_exch *ep = NULL;
596
597 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
598 spin_lock_bh(&mp->em_lock);
599 ep = mp->exches[xid - mp->min_xid];
600 if (ep) {
601 fc_exch_hold(ep);
602 WARN_ON(ep->xid != xid);
603 }
604 spin_unlock_bh(&mp->em_lock);
605 }
606 return ep;
607}
608
609void fc_exch_done(struct fc_seq *sp)
610{
611 struct fc_exch *ep = fc_seq_exch(sp);
612 int rc;
613
614 spin_lock_bh(&ep->ex_lock);
615 rc = fc_exch_done_locked(ep);
616 spin_unlock_bh(&ep->ex_lock);
617 if (!rc)
618 fc_exch_mgr_delete_ep(ep);
619}
620EXPORT_SYMBOL(fc_exch_done);
621
622/*
623 * Allocate a new exchange as responder.
624 * Sets the responder ID in the frame header.
625 */
626static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
627{
628 struct fc_exch *ep;
629 struct fc_frame_header *fh;
630 u16 rxid;
631
632 ep = mp->lp->tt.exch_get(mp->lp, fp);
633 if (ep) {
634 ep->class = fc_frame_class(fp);
635
636 /*
637 * Set EX_CTX indicating we're responding on this exchange.
638 */
639 ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */
640 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */
641 fh = fc_frame_header_get(fp);
642 ep->sid = ntoh24(fh->fh_d_id);
643 ep->did = ntoh24(fh->fh_s_id);
644 ep->oid = ep->did;
645
646 /*
647 * Allocated exchange has placed the XID in the
648 * originator field. Move it to the responder field,
649 * and set the originator XID from the frame.
650 */
651 ep->rxid = ep->xid;
652 ep->oxid = ntohs(fh->fh_ox_id);
653 ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
654 if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
655 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
656
657 /*
658 * Set the responder ID in the frame header.
659 * The old one should've been 0xffff.
660 * If it isn't, don't assign one.
661 * Incoming basic link service frames may specify
662 * a referenced RX_ID.
663 */
664 if (fh->fh_type != FC_TYPE_BLS) {
665 rxid = ntohs(fh->fh_rx_id);
666 WARN_ON(rxid != FC_XID_UNKNOWN);
667 fh->fh_rx_id = htons(ep->rxid);
668 }
669 fc_exch_hold(ep); /* hold for caller */
670 spin_unlock_bh(&ep->ex_lock); /* lock from exch_get */
671 }
672 return ep;
673}
674
675/*
676 * Find a sequence for receive where the other end is originating the sequence.
677 * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
678 * on the ep that should be released by the caller.
679 */
680static enum fc_pf_rjt_reason
681fc_seq_lookup_recip(struct fc_exch_mgr *mp, struct fc_frame *fp)
682{
683 struct fc_frame_header *fh = fc_frame_header_get(fp);
684 struct fc_exch *ep = NULL;
685 struct fc_seq *sp = NULL;
686 enum fc_pf_rjt_reason reject = FC_RJT_NONE;
687 u32 f_ctl;
688 u16 xid;
689
690 f_ctl = ntoh24(fh->fh_f_ctl);
691 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
692
693 /*
694 * Lookup or create the exchange if we will be creating the sequence.
695 */
696 if (f_ctl & FC_FC_EX_CTX) {
697 xid = ntohs(fh->fh_ox_id); /* we originated exch */
698 ep = fc_exch_find(mp, xid);
699 if (!ep) {
700 atomic_inc(&mp->stats.xid_not_found);
701 reject = FC_RJT_OX_ID;
702 goto out;
703 }
704 if (ep->rxid == FC_XID_UNKNOWN)
705 ep->rxid = ntohs(fh->fh_rx_id);
706 else if (ep->rxid != ntohs(fh->fh_rx_id)) {
707 reject = FC_RJT_OX_ID;
708 goto rel;
709 }
710 } else {
711 xid = ntohs(fh->fh_rx_id); /* we are the responder */
712
713 /*
714 * Special case for MDS issuing an ELS TEST with a
715 * bad rxid of 0.
716 * XXX take this out once we do the proper reject.
717 */
718 if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
719 fc_frame_payload_op(fp) == ELS_TEST) {
720 fh->fh_rx_id = htons(FC_XID_UNKNOWN);
721 xid = FC_XID_UNKNOWN;
722 }
723
724 /*
725 * new sequence - find the exchange
726 */
727 ep = fc_exch_find(mp, xid);
728 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
729 if (ep) {
730 atomic_inc(&mp->stats.xid_busy);
731 reject = FC_RJT_RX_ID;
732 goto rel;
733 }
734 ep = fc_exch_resp(mp, fp);
735 if (!ep) {
736 reject = FC_RJT_EXCH_EST; /* XXX */
737 goto out;
738 }
739 xid = ep->xid; /* get our XID */
740 } else if (!ep) {
741 atomic_inc(&mp->stats.xid_not_found);
742 reject = FC_RJT_RX_ID; /* XID not found */
743 goto out;
744 }
745 }
746
747 /*
748 * At this point, we have the exchange held.
749 * Find or create the sequence.
750 */
751 if (fc_sof_is_init(fr_sof(fp))) {
752 sp = fc_seq_start_next(&ep->seq);
753 if (!sp) {
754 reject = FC_RJT_SEQ_XS; /* exchange shortage */
755 goto rel;
756 }
757 sp->id = fh->fh_seq_id;
758 sp->ssb_stat |= SSB_ST_RESP;
759 } else {
760 sp = &ep->seq;
761 if (sp->id != fh->fh_seq_id) {
762 atomic_inc(&mp->stats.seq_not_found);
763 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
764 goto rel;
765 }
766 }
767 WARN_ON(ep != fc_seq_exch(sp));
768
769 if (f_ctl & FC_FC_SEQ_INIT)
770 ep->esb_stat |= ESB_ST_SEQ_INIT;
771
772 fr_seq(fp) = sp;
773out:
774 return reject;
775rel:
776 fc_exch_done(&ep->seq);
777 fc_exch_release(ep); /* hold from fc_exch_find/fc_exch_resp */
778 return reject;
779}
780
781/*
782 * Find the sequence for a frame being received.
783 * We originated the sequence, so it should be found.
784 * We may or may not have originated the exchange.
785 * Does not hold the sequence for the caller.
786 */
787static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
788 struct fc_frame *fp)
789{
790 struct fc_frame_header *fh = fc_frame_header_get(fp);
791 struct fc_exch *ep;
792 struct fc_seq *sp = NULL;
793 u32 f_ctl;
794 u16 xid;
795
796 f_ctl = ntoh24(fh->fh_f_ctl);
797 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
798 xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
799 ep = fc_exch_find(mp, xid);
800 if (!ep)
801 return NULL;
802 if (ep->seq.id == fh->fh_seq_id) {
803 /*
804 * Save the RX_ID if we didn't previously know it.
805 */
806 sp = &ep->seq;
807 if ((f_ctl & FC_FC_EX_CTX) != 0 &&
808 ep->rxid == FC_XID_UNKNOWN) {
809 ep->rxid = ntohs(fh->fh_rx_id);
810 }
811 }
812 fc_exch_release(ep);
813 return sp;
814}
815
816/*
817 * Set addresses for an exchange.
818 * Note this must be done before the first sequence of the exchange is sent.
819 */
820static void fc_exch_set_addr(struct fc_exch *ep,
821 u32 orig_id, u32 resp_id)
822{
823 ep->oid = orig_id;
824 if (ep->esb_stat & ESB_ST_RESP) {
825 ep->sid = resp_id;
826 ep->did = orig_id;
827 } else {
828 ep->sid = orig_id;
829 ep->did = resp_id;
830 }
831}
832
833static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
834{
835 struct fc_exch *ep = fc_seq_exch(sp);
836
837 sp = fc_seq_alloc(ep, ep->seq_id++);
838 FC_DEBUG_EXCH("exch %4x f_ctl %6x seq %2x\n",
839 ep->xid, ep->f_ctl, sp->id);
840 return sp;
841}
842/*
843 * Allocate a new sequence on the same exchange as the supplied sequence.
844 * This will never return NULL.
845 */
846struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
847{
848 struct fc_exch *ep = fc_seq_exch(sp);
849
850 spin_lock_bh(&ep->ex_lock);
851 WARN_ON((ep->esb_stat & ESB_ST_COMPLETE) != 0);
852 sp = fc_seq_start_next_locked(sp);
853 spin_unlock_bh(&ep->ex_lock);
854
855 return sp;
856}
857EXPORT_SYMBOL(fc_seq_start_next);
858
859int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp)
860{
861 struct fc_exch *ep;
862 struct fc_frame_header *fh = fc_frame_header_get(fp);
863 int error;
864 u32 f_ctl;
865
866 ep = fc_seq_exch(sp);
867 WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
868
869 f_ctl = ntoh24(fh->fh_f_ctl);
870 fc_exch_setup_hdr(ep, fp, f_ctl);
871
872 /*
873 * update sequence count if this frame is carrying
874 * multiple FC frames when sequence offload is enabled
875 * by LLD.
876 */
877 if (fr_max_payload(fp))
878 sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
879 fr_max_payload(fp));
880 else
881 sp->cnt++;
882
883 /*
884 * Send the frame.
885 */
886 error = lp->tt.frame_send(lp, fp);
887
888 /*
889 * Update the exchange and sequence flags,
890 * assuming all frames for the sequence have been sent.
891 * We can only be called to send once for each sequence.
892 */
893 spin_lock_bh(&ep->ex_lock);
894 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
895 if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT))
896 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
897 spin_unlock_bh(&ep->ex_lock);
898 return error;
899}
900EXPORT_SYMBOL(fc_seq_send);
901
902void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
903 struct fc_seq_els_data *els_data)
904{
905 switch (els_cmd) {
906 case ELS_LS_RJT:
907 fc_seq_ls_rjt(sp, els_data->reason, els_data->explan);
908 break;
909 case ELS_LS_ACC:
910 fc_seq_ls_acc(sp);
911 break;
912 case ELS_RRQ:
913 fc_exch_els_rrq(sp, els_data->fp);
914 break;
915 case ELS_REC:
916 fc_exch_els_rec(sp, els_data->fp);
917 break;
918 default:
919 FC_DBG("Invalid ELS CMD:%x\n", els_cmd);
920 }
921}
922EXPORT_SYMBOL(fc_seq_els_rsp_send);
923
924/*
925 * Send a sequence, which is also the last sequence in the exchange.
926 */
927static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
928 enum fc_rctl rctl, enum fc_fh_type fh_type)
929{
930 u32 f_ctl;
931 struct fc_exch *ep = fc_seq_exch(sp);
932
933 f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
934 f_ctl |= ep->f_ctl;
935 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
936 fc_seq_send(ep->lp, sp, fp);
937}
938
939/*
940 * Send ACK_1 (or equiv.) indicating we received something.
941 * The frame we're acking is supplied.
942 */
943static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
944{
945 struct fc_frame *fp;
946 struct fc_frame_header *rx_fh;
947 struct fc_frame_header *fh;
948 struct fc_exch *ep = fc_seq_exch(sp);
949 struct fc_lport *lp = ep->lp;
950 unsigned int f_ctl;
951
952 /*
953 * Don't send ACKs for class 3.
954 */
955 if (fc_sof_needs_ack(fr_sof(rx_fp))) {
956 fp = fc_frame_alloc(lp, 0);
957 if (!fp)
958 return;
959
960 fh = fc_frame_header_get(fp);
961 fh->fh_r_ctl = FC_RCTL_ACK_1;
962 fh->fh_type = FC_TYPE_BLS;
963
964 /*
965 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
966 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
967 * Bits 9-8 are meaningful (retransmitted or unidirectional).
968 * Last ACK uses bits 7-6 (continue sequence),
969 * bits 5-4 are meaningful (what kind of ACK to use).
970 */
971 rx_fh = fc_frame_header_get(rx_fp);
972 f_ctl = ntoh24(rx_fh->fh_f_ctl);
973 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
974 FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
975 FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
976 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
977 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
978 hton24(fh->fh_f_ctl, f_ctl);
979
980 fc_exch_setup_hdr(ep, fp, f_ctl);
981 fh->fh_seq_id = rx_fh->fh_seq_id;
982 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
983 fh->fh_parm_offset = htonl(1); /* ack single frame */
984
985 fr_sof(fp) = fr_sof(rx_fp);
986 if (f_ctl & FC_FC_END_SEQ)
987 fr_eof(fp) = FC_EOF_T;
988 else
989 fr_eof(fp) = FC_EOF_N;
990
991 (void) lp->tt.frame_send(lp, fp);
992 }
993}
994
995/*
996 * Send BLS Reject.
997 * This is for rejecting BA_ABTS only.
998 */
999static void
1000fc_exch_send_ba_rjt(struct fc_frame *rx_fp, enum fc_ba_rjt_reason reason,
1001 enum fc_ba_rjt_explan explan)
1002{
1003 struct fc_frame *fp;
1004 struct fc_frame_header *rx_fh;
1005 struct fc_frame_header *fh;
1006 struct fc_ba_rjt *rp;
1007 struct fc_lport *lp;
1008 unsigned int f_ctl;
1009
1010 lp = fr_dev(rx_fp);
1011 fp = fc_frame_alloc(lp, sizeof(*rp));
1012 if (!fp)
1013 return;
1014 fh = fc_frame_header_get(fp);
1015 rx_fh = fc_frame_header_get(rx_fp);
1016
1017 memset(fh, 0, sizeof(*fh) + sizeof(*rp));
1018
1019 rp = fc_frame_payload_get(fp, sizeof(*rp));
1020 rp->br_reason = reason;
1021 rp->br_explan = explan;
1022
1023 /*
1024 * seq_id, cs_ctl, df_ctl and param/offset are zero.
1025 */
1026 memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1027 memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1028 fh->fh_ox_id = rx_fh->fh_rx_id;
1029 fh->fh_rx_id = rx_fh->fh_ox_id;
1030 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1031 fh->fh_r_ctl = FC_RCTL_BA_RJT;
1032 fh->fh_type = FC_TYPE_BLS;
1033
1034 /*
1035 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1036 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1037 * Bits 9-8 are meaningful (retransmitted or unidirectional).
1038 * Last ACK uses bits 7-6 (continue sequence),
1039 * bits 5-4 are meaningful (what kind of ACK to use).
1040 * Always set LAST_SEQ, END_SEQ.
1041 */
1042 f_ctl = ntoh24(rx_fh->fh_f_ctl);
1043 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1044 FC_FC_END_CONN | FC_FC_SEQ_INIT |
1045 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1046 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1047 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1048 f_ctl &= ~FC_FC_FIRST_SEQ;
1049 hton24(fh->fh_f_ctl, f_ctl);
1050
1051 fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
1052 fr_eof(fp) = FC_EOF_T;
1053 if (fc_sof_needs_ack(fr_sof(fp)))
1054 fr_eof(fp) = FC_EOF_N;
1055
1056 (void) lp->tt.frame_send(lp, fp);
1057}
1058
1059/*
1060 * Handle an incoming ABTS. This would be for target mode usually,
1061 * but could be due to lost FCP transfer ready, confirm or RRQ.
1062 * We always handle this as an exchange abort, ignoring the parameter.
1063 */
1064static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1065{
1066 struct fc_frame *fp;
1067 struct fc_ba_acc *ap;
1068 struct fc_frame_header *fh;
1069 struct fc_seq *sp;
1070
1071 if (!ep)
1072 goto reject;
1073 spin_lock_bh(&ep->ex_lock);
1074 if (ep->esb_stat & ESB_ST_COMPLETE) {
1075 spin_unlock_bh(&ep->ex_lock);
1076 goto reject;
1077 }
1078 if (!(ep->esb_stat & ESB_ST_REC_QUAL))
1079 fc_exch_hold(ep); /* hold for REC_QUAL */
1080 ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
1081 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1082
1083 fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1084 if (!fp) {
1085 spin_unlock_bh(&ep->ex_lock);
1086 goto free;
1087 }
1088 fh = fc_frame_header_get(fp);
1089 ap = fc_frame_payload_get(fp, sizeof(*ap));
1090 memset(ap, 0, sizeof(*ap));
1091 sp = &ep->seq;
1092 ap->ba_high_seq_cnt = htons(0xffff);
1093 if (sp->ssb_stat & SSB_ST_RESP) {
1094 ap->ba_seq_id = sp->id;
1095 ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
1096 ap->ba_high_seq_cnt = fh->fh_seq_cnt;
1097 ap->ba_low_seq_cnt = htons(sp->cnt);
1098 }
1099 sp = fc_seq_start_next(sp);
1100 spin_unlock_bh(&ep->ex_lock);
1101 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1102 fc_frame_free(rx_fp);
1103 return;
1104
1105reject:
1106 fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
1107free:
1108 fc_frame_free(rx_fp);
1109}
1110
1111/*
1112 * Handle receive where the other end is originating the sequence.
1113 */
1114static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
1115 struct fc_frame *fp)
1116{
1117 struct fc_frame_header *fh = fc_frame_header_get(fp);
1118 struct fc_seq *sp = NULL;
1119 struct fc_exch *ep = NULL;
1120 enum fc_sof sof;
1121 enum fc_eof eof;
1122 u32 f_ctl;
1123 enum fc_pf_rjt_reason reject;
1124
1125 fr_seq(fp) = NULL;
1126 reject = fc_seq_lookup_recip(mp, fp);
1127 if (reject == FC_RJT_NONE) {
1128 sp = fr_seq(fp); /* sequence will be held */
1129 ep = fc_seq_exch(sp);
1130 sof = fr_sof(fp);
1131 eof = fr_eof(fp);
1132 f_ctl = ntoh24(fh->fh_f_ctl);
1133 fc_seq_send_ack(sp, fp);
1134
1135 /*
1136 * Call the receive function.
1137 *
1138 * The receive function may allocate a new sequence
1139 * over the old one, so we shouldn't change the
1140 * sequence after this.
1141 *
1142 * The frame will be freed by the receive function.
1143 * If new exch resp handler is valid then call that
1144 * first.
1145 */
1146 if (ep->resp)
1147 ep->resp(sp, fp, ep->arg);
1148 else
1149 lp->tt.lport_recv(lp, sp, fp);
1150 fc_exch_release(ep); /* release from lookup */
1151 } else {
1152 FC_DEBUG_EXCH("exch/seq lookup failed: reject %x\n", reject);
1153 fc_frame_free(fp);
1154 }
1155}
1156
1157/*
1158 * Handle receive where the other end is originating the sequence in
1159 * response to our exchange.
1160 */
1161static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1162{
1163 struct fc_frame_header *fh = fc_frame_header_get(fp);
1164 struct fc_seq *sp;
1165 struct fc_exch *ep;
1166 enum fc_sof sof;
1167 u32 f_ctl;
1168 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1169 void *ex_resp_arg;
1170 int rc;
1171
1172 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1173 if (!ep) {
1174 atomic_inc(&mp->stats.xid_not_found);
1175 goto out;
1176 }
1177 if (ep->rxid == FC_XID_UNKNOWN)
1178 ep->rxid = ntohs(fh->fh_rx_id);
1179 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
1180 atomic_inc(&mp->stats.xid_not_found);
1181 goto rel;
1182 }
1183 if (ep->did != ntoh24(fh->fh_s_id) &&
1184 ep->did != FC_FID_FLOGI) {
1185 atomic_inc(&mp->stats.xid_not_found);
1186 goto rel;
1187 }
1188 sof = fr_sof(fp);
1189 if (fc_sof_is_init(sof)) {
1190 sp = fc_seq_start_next(&ep->seq);
1191 sp->id = fh->fh_seq_id;
1192 sp->ssb_stat |= SSB_ST_RESP;
1193 } else {
1194 sp = &ep->seq;
1195 if (sp->id != fh->fh_seq_id) {
1196 atomic_inc(&mp->stats.seq_not_found);
1197 goto rel;
1198 }
1199 }
1200 f_ctl = ntoh24(fh->fh_f_ctl);
1201 fr_seq(fp) = sp;
1202 if (f_ctl & FC_FC_SEQ_INIT)
1203 ep->esb_stat |= ESB_ST_SEQ_INIT;
1204
1205 if (fc_sof_needs_ack(sof))
1206 fc_seq_send_ack(sp, fp);
1207 resp = ep->resp;
1208 ex_resp_arg = ep->arg;
1209
1210 if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
1211 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1212 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1213 spin_lock_bh(&ep->ex_lock);
1214 rc = fc_exch_done_locked(ep);
1215 WARN_ON(fc_seq_exch(sp) != ep);
1216 spin_unlock_bh(&ep->ex_lock);
1217 if (!rc)
1218 fc_exch_mgr_delete_ep(ep);
1219 }
1220
1221 /*
1222 * Call the receive function.
1223 * The sequence is held (has a refcnt) for us,
1224 * but not for the receive function.
1225 *
1226 * The receive function may allocate a new sequence
1227 * over the old one, so we shouldn't change the
1228 * sequence after this.
1229 *
1230 * The frame will be freed by the receive function.
1231 * If new exch resp handler is valid then call that
1232 * first.
1233 */
1234 if (resp)
1235 resp(sp, fp, ex_resp_arg);
1236 else
1237 fc_frame_free(fp);
1238 fc_exch_release(ep);
1239 return;
1240rel:
1241 fc_exch_release(ep);
1242out:
1243 fc_frame_free(fp);
1244}
1245
1246/*
1247 * Handle receive for a sequence where other end is responding to our sequence.
1248 */
1249static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1250{
1251 struct fc_seq *sp;
1252
1253 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
1254 if (!sp) {
1255 atomic_inc(&mp->stats.xid_not_found);
1256 FC_DEBUG_EXCH("seq lookup failed\n");
1257 } else {
1258 atomic_inc(&mp->stats.non_bls_resp);
1259 FC_DEBUG_EXCH("non-BLS response to sequence");
1260 }
1261 fc_frame_free(fp);
1262}
1263
1264/*
1265 * Handle the response to an ABTS for exchange or sequence.
1266 * This can be BA_ACC or BA_RJT.
1267 */
1268static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1269{
1270 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1271 void *ex_resp_arg;
1272 struct fc_frame_header *fh;
1273 struct fc_ba_acc *ap;
1274 struct fc_seq *sp;
1275 u16 low;
1276 u16 high;
1277 int rc = 1, has_rec = 0;
1278
1279 fh = fc_frame_header_get(fp);
1280 FC_DEBUG_EXCH("exch: BLS rctl %x - %s\n",
1281 fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl));
1282
1283 if (cancel_delayed_work_sync(&ep->timeout_work))
1284 fc_exch_release(ep); /* release from pending timer hold */
1285
1286 spin_lock_bh(&ep->ex_lock);
1287 switch (fh->fh_r_ctl) {
1288 case FC_RCTL_BA_ACC:
1289 ap = fc_frame_payload_get(fp, sizeof(*ap));
1290 if (!ap)
1291 break;
1292
1293 /*
1294 * Decide whether to establish a Recovery Qualifier.
1295 * We do this if there is a non-empty SEQ_CNT range and
1296 * SEQ_ID is the same as the one we aborted.
1297 */
1298 low = ntohs(ap->ba_low_seq_cnt);
1299 high = ntohs(ap->ba_high_seq_cnt);
1300 if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
1301 (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
1302 ap->ba_seq_id == ep->seq_id) && low != high) {
1303 ep->esb_stat |= ESB_ST_REC_QUAL;
1304 fc_exch_hold(ep); /* hold for recovery qualifier */
1305 has_rec = 1;
1306 }
1307 break;
1308 case FC_RCTL_BA_RJT:
1309 break;
1310 default:
1311 break;
1312 }
1313
1314 resp = ep->resp;
1315 ex_resp_arg = ep->arg;
1316
1317 /* do we need to do some other checks here. Can we reuse more of
1318 * fc_exch_recv_seq_resp
1319 */
1320 sp = &ep->seq;
1321 /*
1322 * do we want to check END_SEQ as well as LAST_SEQ here?
1323 */
1324 if (ep->fh_type != FC_TYPE_FCP &&
1325 ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
1326 rc = fc_exch_done_locked(ep);
1327 spin_unlock_bh(&ep->ex_lock);
1328 if (!rc)
1329 fc_exch_mgr_delete_ep(ep);
1330
1331 if (resp)
1332 resp(sp, fp, ex_resp_arg);
1333 else
1334 fc_frame_free(fp);
1335
1336 if (has_rec)
1337 fc_exch_timer_set(ep, ep->r_a_tov);
1338
1339}
1340
1341/*
1342 * Receive BLS sequence.
1343 * This is always a sequence initiated by the remote side.
1344 * We may be either the originator or recipient of the exchange.
1345 */
1346static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1347{
1348 struct fc_frame_header *fh;
1349 struct fc_exch *ep;
1350 u32 f_ctl;
1351
1352 fh = fc_frame_header_get(fp);
1353 f_ctl = ntoh24(fh->fh_f_ctl);
1354 fr_seq(fp) = NULL;
1355
1356 ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1357 ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
1358 if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
1359 spin_lock_bh(&ep->ex_lock);
1360 ep->esb_stat |= ESB_ST_SEQ_INIT;
1361 spin_unlock_bh(&ep->ex_lock);
1362 }
1363 if (f_ctl & FC_FC_SEQ_CTX) {
1364 /*
1365 * A response to a sequence we initiated.
1366 * This should only be ACKs for class 2 or F.
1367 */
1368 switch (fh->fh_r_ctl) {
1369 case FC_RCTL_ACK_1:
1370 case FC_RCTL_ACK_0:
1371 break;
1372 default:
1373 FC_DEBUG_EXCH("BLS rctl %x - %s received",
1374 fh->fh_r_ctl,
1375 fc_exch_rctl_name(fh->fh_r_ctl));
1376 break;
1377 }
1378 fc_frame_free(fp);
1379 } else {
1380 switch (fh->fh_r_ctl) {
1381 case FC_RCTL_BA_RJT:
1382 case FC_RCTL_BA_ACC:
1383 if (ep)
1384 fc_exch_abts_resp(ep, fp);
1385 else
1386 fc_frame_free(fp);
1387 break;
1388 case FC_RCTL_BA_ABTS:
1389 fc_exch_recv_abts(ep, fp);
1390 break;
1391 default: /* ignore junk */
1392 fc_frame_free(fp);
1393 break;
1394 }
1395 }
1396 if (ep)
1397 fc_exch_release(ep); /* release hold taken by fc_exch_find */
1398}
1399
1400/*
1401 * Accept sequence with LS_ACC.
1402 * If this fails due to allocation or transmit congestion, assume the
1403 * originator will repeat the sequence.
1404 */
1405static void fc_seq_ls_acc(struct fc_seq *req_sp)
1406{
1407 struct fc_seq *sp;
1408 struct fc_els_ls_acc *acc;
1409 struct fc_frame *fp;
1410
1411 sp = fc_seq_start_next(req_sp);
1412 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
1413 if (fp) {
1414 acc = fc_frame_payload_get(fp, sizeof(*acc));
1415 memset(acc, 0, sizeof(*acc));
1416 acc->la_cmd = ELS_LS_ACC;
1417 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1418 }
1419}
1420
1421/*
1422 * Reject sequence with ELS LS_RJT.
1423 * If this fails due to allocation or transmit congestion, assume the
1424 * originator will repeat the sequence.
1425 */
1426static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason,
1427 enum fc_els_rjt_explan explan)
1428{
1429 struct fc_seq *sp;
1430 struct fc_els_ls_rjt *rjt;
1431 struct fc_frame *fp;
1432
1433 sp = fc_seq_start_next(req_sp);
1434 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*rjt));
1435 if (fp) {
1436 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1437 memset(rjt, 0, sizeof(*rjt));
1438 rjt->er_cmd = ELS_LS_RJT;
1439 rjt->er_reason = reason;
1440 rjt->er_explan = explan;
1441 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1442 }
1443}
1444
1445static void fc_exch_reset(struct fc_exch *ep)
1446{
1447 struct fc_seq *sp;
1448 void (*resp)(struct fc_seq *, struct fc_frame *, void *);
1449 void *arg;
1450 int rc = 1;
1451
1452 spin_lock_bh(&ep->ex_lock);
1453 ep->state |= FC_EX_RST_CLEANUP;
1454 /*
1455 * we really want to call del_timer_sync, but cannot due
1456 * to the lport calling with the lport lock held (some resp
1457 * functions can also grab the lport lock which could cause
1458 * a deadlock).
1459 */
1460 if (cancel_delayed_work(&ep->timeout_work))
1461 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
1462 resp = ep->resp;
1463 ep->resp = NULL;
1464 if (ep->esb_stat & ESB_ST_REC_QUAL)
1465 atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
1466 ep->esb_stat &= ~ESB_ST_REC_QUAL;
1467 arg = ep->arg;
1468 sp = &ep->seq;
1469 rc = fc_exch_done_locked(ep);
1470 spin_unlock_bh(&ep->ex_lock);
1471 if (!rc)
1472 fc_exch_mgr_delete_ep(ep);
1473
1474 if (resp)
1475 resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
1476}
1477
1478/*
1479 * Reset an exchange manager, releasing all sequences and exchanges.
1480 * If sid is non-zero, reset only exchanges we source from that FID.
1481 * If did is non-zero, reset only exchanges destined to that FID.
1482 */
1483void fc_exch_mgr_reset(struct fc_exch_mgr *mp, u32 sid, u32 did)
1484{
1485 struct fc_exch *ep;
1486 struct fc_exch *next;
1487
1488 spin_lock_bh(&mp->em_lock);
1489restart:
1490 list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) {
1491 if ((sid == 0 || sid == ep->sid) &&
1492 (did == 0 || did == ep->did)) {
1493 fc_exch_hold(ep);
1494 spin_unlock_bh(&mp->em_lock);
1495
1496 fc_exch_reset(ep);
1497
1498 fc_exch_release(ep);
1499 spin_lock_bh(&mp->em_lock);
1500
1501 /*
1502 * must restart loop incase while lock was down
1503 * multiple eps were released.
1504 */
1505 goto restart;
1506 }
1507 }
1508 spin_unlock_bh(&mp->em_lock);
1509}
1510EXPORT_SYMBOL(fc_exch_mgr_reset);
1511
1512/*
1513 * Handle incoming ELS REC - Read Exchange Concise.
1514 * Note that the requesting port may be different than the S_ID in the request.
1515 */
1516static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
1517{
1518 struct fc_frame *fp;
1519 struct fc_exch *ep;
1520 struct fc_exch_mgr *em;
1521 struct fc_els_rec *rp;
1522 struct fc_els_rec_acc *acc;
1523 enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
1524 enum fc_els_rjt_explan explan;
1525 u32 sid;
1526 u16 rxid;
1527 u16 oxid;
1528
1529 rp = fc_frame_payload_get(rfp, sizeof(*rp));
1530 explan = ELS_EXPL_INV_LEN;
1531 if (!rp)
1532 goto reject;
1533 sid = ntoh24(rp->rec_s_id);
1534 rxid = ntohs(rp->rec_rx_id);
1535 oxid = ntohs(rp->rec_ox_id);
1536
1537 /*
1538 * Currently it's hard to find the local S_ID from the exchange
1539 * manager. This will eventually be fixed, but for now it's easier
1540 * to lookup the subject exchange twice, once as if we were
1541 * the initiator, and then again if we weren't.
1542 */
1543 em = fc_seq_exch(sp)->em;
1544 ep = fc_exch_find(em, oxid);
1545 explan = ELS_EXPL_OXID_RXID;
1546 if (ep && ep->oid == sid) {
1547 if (ep->rxid != FC_XID_UNKNOWN &&
1548 rxid != FC_XID_UNKNOWN &&
1549 ep->rxid != rxid)
1550 goto rel;
1551 } else {
1552 if (ep)
1553 fc_exch_release(ep);
1554 ep = NULL;
1555 if (rxid != FC_XID_UNKNOWN)
1556 ep = fc_exch_find(em, rxid);
1557 if (!ep)
1558 goto reject;
1559 }
1560
1561 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
1562 if (!fp) {
1563 fc_exch_done(sp);
1564 goto out;
1565 }
1566 sp = fc_seq_start_next(sp);
1567 acc = fc_frame_payload_get(fp, sizeof(*acc));
1568 memset(acc, 0, sizeof(*acc));
1569 acc->reca_cmd = ELS_LS_ACC;
1570 acc->reca_ox_id = rp->rec_ox_id;
1571 memcpy(acc->reca_ofid, rp->rec_s_id, 3);
1572 acc->reca_rx_id = htons(ep->rxid);
1573 if (ep->sid == ep->oid)
1574 hton24(acc->reca_rfid, ep->did);
1575 else
1576 hton24(acc->reca_rfid, ep->sid);
1577 acc->reca_fc4value = htonl(ep->seq.rec_data);
1578 acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
1579 ESB_ST_SEQ_INIT |
1580 ESB_ST_COMPLETE));
1581 sp = fc_seq_start_next(sp);
1582 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1583out:
1584 fc_exch_release(ep);
1585 fc_frame_free(rfp);
1586 return;
1587
1588rel:
1589 fc_exch_release(ep);
1590reject:
1591 fc_seq_ls_rjt(sp, reason, explan);
1592 fc_frame_free(rfp);
1593}
1594
1595/*
1596 * Handle response from RRQ.
1597 * Not much to do here, really.
1598 * Should report errors.
1599 *
1600 * TODO: fix error handler.
1601 */
1602static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1603{
1604 struct fc_exch *aborted_ep = arg;
1605 unsigned int op;
1606
1607 if (IS_ERR(fp)) {
1608 int err = PTR_ERR(fp);
1609
1610 if (err == -FC_EX_CLOSED)
1611 goto cleanup;
1612 FC_DBG("Cannot process RRQ, because of frame error %d\n", err);
1613 return;
1614 }
1615
1616 op = fc_frame_payload_op(fp);
1617 fc_frame_free(fp);
1618
1619 switch (op) {
1620 case ELS_LS_RJT:
1621 FC_DBG("LS_RJT for RRQ");
1622 /* fall through */
1623 case ELS_LS_ACC:
1624 goto cleanup;
1625 default:
1626 FC_DBG("unexpected response op %x for RRQ", op);
1627 return;
1628 }
1629
1630cleanup:
1631 fc_exch_done(&aborted_ep->seq);
1632 /* drop hold for rec qual */
1633 fc_exch_release(aborted_ep);
1634}
1635
1636/*
1637 * Send ELS RRQ - Reinstate Recovery Qualifier.
1638 * This tells the remote port to stop blocking the use of
1639 * the exchange and the seq_cnt range.
1640 */
1641static void fc_exch_rrq(struct fc_exch *ep)
1642{
1643 struct fc_lport *lp;
1644 struct fc_els_rrq *rrq;
1645 struct fc_frame *fp;
1646 struct fc_seq *rrq_sp;
1647 u32 did;
1648
1649 lp = ep->lp;
1650
1651 fp = fc_frame_alloc(lp, sizeof(*rrq));
1652 if (!fp)
1653 return;
1654 rrq = fc_frame_payload_get(fp, sizeof(*rrq));
1655 memset(rrq, 0, sizeof(*rrq));
1656 rrq->rrq_cmd = ELS_RRQ;
1657 hton24(rrq->rrq_s_id, ep->sid);
1658 rrq->rrq_ox_id = htons(ep->oxid);
1659 rrq->rrq_rx_id = htons(ep->rxid);
1660
1661 did = ep->did;
1662 if (ep->esb_stat & ESB_ST_RESP)
1663 did = ep->sid;
1664
1665 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
1666 fc_host_port_id(lp->host), FC_TYPE_ELS,
1667 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1668
1669 rrq_sp = fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep,
1670 lp->e_d_tov);
1671 if (!rrq_sp) {
1672 ep->esb_stat |= ESB_ST_REC_QUAL;
1673 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1674 return;
1675 }
1676}
1677
1678
1679/*
1680 * Handle incoming ELS RRQ - Reset Recovery Qualifier.
1681 */
1682static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
1683{
1684 struct fc_exch *ep; /* request or subject exchange */
1685 struct fc_els_rrq *rp;
1686 u32 sid;
1687 u16 xid;
1688 enum fc_els_rjt_explan explan;
1689
1690 rp = fc_frame_payload_get(fp, sizeof(*rp));
1691 explan = ELS_EXPL_INV_LEN;
1692 if (!rp)
1693 goto reject;
1694
1695 /*
1696 * lookup subject exchange.
1697 */
1698 ep = fc_seq_exch(sp);
1699 sid = ntoh24(rp->rrq_s_id); /* subject source */
1700 xid = ep->did == sid ? ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
1701 ep = fc_exch_find(ep->em, xid);
1702
1703 explan = ELS_EXPL_OXID_RXID;
1704 if (!ep)
1705 goto reject;
1706 spin_lock_bh(&ep->ex_lock);
1707 if (ep->oxid != ntohs(rp->rrq_ox_id))
1708 goto unlock_reject;
1709 if (ep->rxid != ntohs(rp->rrq_rx_id) &&
1710 ep->rxid != FC_XID_UNKNOWN)
1711 goto unlock_reject;
1712 explan = ELS_EXPL_SID;
1713 if (ep->sid != sid)
1714 goto unlock_reject;
1715
1716 /*
1717 * Clear Recovery Qualifier state, and cancel timer if complete.
1718 */
1719 if (ep->esb_stat & ESB_ST_REC_QUAL) {
1720 ep->esb_stat &= ~ESB_ST_REC_QUAL;
1721 atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
1722 }
1723 if (ep->esb_stat & ESB_ST_COMPLETE) {
1724 if (cancel_delayed_work(&ep->timeout_work))
1725 atomic_dec(&ep->ex_refcnt); /* drop timer hold */
1726 }
1727
1728 spin_unlock_bh(&ep->ex_lock);
1729
1730 /*
1731 * Send LS_ACC.
1732 */
1733 fc_seq_ls_acc(sp);
1734 fc_frame_free(fp);
1735 return;
1736
1737unlock_reject:
1738 spin_unlock_bh(&ep->ex_lock);
1739 fc_exch_release(ep); /* drop hold from fc_exch_find */
1740reject:
1741 fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan);
1742 fc_frame_free(fp);
1743}
1744
1745struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
1746 enum fc_class class,
1747 u16 min_xid, u16 max_xid)
1748{
1749 struct fc_exch_mgr *mp;
1750 size_t len;
1751
1752 if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
1753 FC_DBG("Invalid min_xid 0x:%x and max_xid 0x:%x\n",
1754 min_xid, max_xid);
1755 return NULL;
1756 }
1757
1758 /*
1759 * Memory need for EM
1760 */
1761#define xid_ok(i, m1, m2) (((i) >= (m1)) && ((i) <= (m2)))
1762 len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
1763 len += sizeof(struct fc_exch_mgr);
1764
1765 mp = kzalloc(len, GFP_ATOMIC);
1766 if (!mp)
1767 return NULL;
1768
1769 mp->class = class;
1770 mp->total_exches = 0;
1771 mp->exches = (struct fc_exch **)(mp + 1);
1772 mp->lp = lp;
1773 /* adjust em exch xid range for offload */
1774 mp->min_xid = min_xid;
1775 mp->max_xid = max_xid;
1776 mp->last_xid = min_xid - 1;
1777 mp->max_read = 0;
1778 mp->last_read = 0;
1779 if (lp->lro_enabled && xid_ok(lp->lro_xid, min_xid, max_xid)) {
1780 mp->max_read = lp->lro_xid;
1781 mp->last_read = min_xid - 1;
1782 mp->last_xid = mp->max_read;
1783 } else {
1784 /* disable lro if no xid control over read */
1785 lp->lro_enabled = 0;
1786 }
1787
1788 INIT_LIST_HEAD(&mp->ex_list);
1789 spin_lock_init(&mp->em_lock);
1790
1791 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
1792 if (!mp->ep_pool)
1793 goto free_mp;
1794
1795 return mp;
1796
1797free_mp:
1798 kfree(mp);
1799 return NULL;
1800}
1801EXPORT_SYMBOL(fc_exch_mgr_alloc);
1802
1803void fc_exch_mgr_free(struct fc_exch_mgr *mp)
1804{
1805 WARN_ON(!mp);
1806 /*
1807 * The total exch count must be zero
1808 * before freeing exchange manager.
1809 */
1810 WARN_ON(mp->total_exches != 0);
1811 mempool_destroy(mp->ep_pool);
1812 kfree(mp);
1813}
1814EXPORT_SYMBOL(fc_exch_mgr_free);
1815
1816struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp)
1817{
1818 if (!lp || !lp->emp)
1819 return NULL;
1820
1821 return fc_exch_alloc(lp->emp, fp, 0);
1822}
1823EXPORT_SYMBOL(fc_exch_get);
1824
1825struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
1826 struct fc_frame *fp,
1827 void (*resp)(struct fc_seq *,
1828 struct fc_frame *fp,
1829 void *arg),
1830 void (*destructor)(struct fc_seq *, void *),
1831 void *arg, u32 timer_msec)
1832{
1833 struct fc_exch *ep;
1834 struct fc_seq *sp = NULL;
1835 struct fc_frame_header *fh;
1836 int rc = 1;
1837
1838 ep = lp->tt.exch_get(lp, fp);
1839 if (!ep) {
1840 fc_frame_free(fp);
1841 return NULL;
1842 }
1843 ep->esb_stat |= ESB_ST_SEQ_INIT;
1844 fh = fc_frame_header_get(fp);
1845 fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
1846 ep->resp = resp;
1847 ep->destructor = destructor;
1848 ep->arg = arg;
1849 ep->r_a_tov = FC_DEF_R_A_TOV;
1850 ep->lp = lp;
1851 sp = &ep->seq;
1852
1853 ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
1854 ep->f_ctl = ntoh24(fh->fh_f_ctl);
1855 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
1856 sp->cnt++;
1857
1858 if (unlikely(lp->tt.frame_send(lp, fp)))
1859 goto err;
1860
1861 if (timer_msec)
1862 fc_exch_timer_set_locked(ep, timer_msec);
1863 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
1864
1865 if (ep->f_ctl & FC_FC_SEQ_INIT)
1866 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
1867 spin_unlock_bh(&ep->ex_lock);
1868 return sp;
1869err:
1870 rc = fc_exch_done_locked(ep);
1871 spin_unlock_bh(&ep->ex_lock);
1872 if (!rc)
1873 fc_exch_mgr_delete_ep(ep);
1874 return NULL;
1875}
1876EXPORT_SYMBOL(fc_exch_seq_send);
1877
1878/*
1879 * Receive a frame
1880 */
1881void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
1882 struct fc_frame *fp)
1883{
1884 struct fc_frame_header *fh = fc_frame_header_get(fp);
1885 u32 f_ctl;
1886
1887 /* lport lock ? */
1888 if (!lp || !mp || (lp->state == LPORT_ST_NONE)) {
1889 FC_DBG("fc_lport or EM is not allocated and configured");
1890 fc_frame_free(fp);
1891 return;
1892 }
1893
1894 /*
1895 * If frame is marked invalid, just drop it.
1896 */
1897 f_ctl = ntoh24(fh->fh_f_ctl);
1898 switch (fr_eof(fp)) {
1899 case FC_EOF_T:
1900 if (f_ctl & FC_FC_END_SEQ)
1901 skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
1902 /* fall through */
1903 case FC_EOF_N:
1904 if (fh->fh_type == FC_TYPE_BLS)
1905 fc_exch_recv_bls(mp, fp);
1906 else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
1907 FC_FC_EX_CTX)
1908 fc_exch_recv_seq_resp(mp, fp);
1909 else if (f_ctl & FC_FC_SEQ_CTX)
1910 fc_exch_recv_resp(mp, fp);
1911 else
1912 fc_exch_recv_req(lp, mp, fp);
1913 break;
1914 default:
1915 FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp));
1916 fc_frame_free(fp);
1917 break;
1918 }
1919}
1920EXPORT_SYMBOL(fc_exch_recv);
1921
1922int fc_exch_init(struct fc_lport *lp)
1923{
1924 if (!lp->tt.exch_get) {
1925 /*
1926 * exch_put() should be NULL if
1927 * exch_get() is NULL
1928 */
1929 WARN_ON(lp->tt.exch_put);
1930 lp->tt.exch_get = fc_exch_get;
1931 }
1932
1933 if (!lp->tt.seq_start_next)
1934 lp->tt.seq_start_next = fc_seq_start_next;
1935
1936 if (!lp->tt.exch_seq_send)
1937 lp->tt.exch_seq_send = fc_exch_seq_send;
1938
1939 if (!lp->tt.seq_send)
1940 lp->tt.seq_send = fc_seq_send;
1941
1942 if (!lp->tt.seq_els_rsp_send)
1943 lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
1944
1945 if (!lp->tt.exch_done)
1946 lp->tt.exch_done = fc_exch_done;
1947
1948 if (!lp->tt.exch_mgr_reset)
1949 lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
1950
1951 if (!lp->tt.seq_exch_abort)
1952 lp->tt.seq_exch_abort = fc_seq_exch_abort;
1953
1954 return 0;
1955}
1956EXPORT_SYMBOL(fc_exch_init);
1957
1958int fc_setup_exch_mgr(void)
1959{
1960 fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
1961 0, SLAB_HWCACHE_ALIGN, NULL);
1962 if (!fc_em_cachep)
1963 return -ENOMEM;
1964 return 0;
1965}
1966
1967void fc_destroy_exch_mgr(void)
1968{
1969 kmem_cache_destroy(fc_em_cachep);
1970}